github.com/pingcap/tidb/planner.optimize at optimize.go:335 github.com/pingcap/tidb/planner.Optimize at optimize.go:211 github.com/pingcap/tidb/executor.(*Compiler).Compile at compiler.go:77 github.com/pingcap/tidb/session.(*session).ExecuteStmt at session.go:1696 github.com/pingcap/tidb/server.(*TiDBContext).ExecuteStmt at driver_tidb.go:220 github.com/pingcap/tidb/server.(*clientConn).handleStmt at conn.go:1977 github.com/pingcap/tidb/server.(*clientConn).handleQuery at conn.go:1846 github.com/pingcap/tidb/server.(*clientConn).dispatch at conn.go:1341 github.com/pingcap/tidb/server.(*clientConn).Run at conn.go:1091 github.com/pingcap/tidb/server.(*Server).onConn at server.go:556 runtime.goexit at asm_amd64.s:1371 - Async stack trace github.com/pingcap/tidb/server.(*Server).startNetworkListener at server.go:453
上面这是一个基本的执行流程,我们跟着这一段堆栈来进行分析。
github.com/pingcap/tidb/server.(*Server).onConn at server.go (连接处理逻辑)
1
conn.Run(ctx)
这里,我们看到了这是进入到了一个clientConn的 Run 方法。
1 2 3 4 5 6 7
// Run reads client query and writes query result to client in for loop, if there is a panic during query handling, // it will be recovered and log the panic error. // This function returns and the connection is closed if there is an IO error or there is a panic. // 在for循环中,执行读取客户端查询,并将查询结果写入客户端,如果在处理查询时出现panic, // 它将被恢复并记录panic错误。 // 如果出现IO错误或panic,该函数返回并关闭连接。 func(cc *clientConn)Run(ctx context.Context)
// Usually, client connection status changes between [dispatching] <=> [reading]. // When some event happens, server may notify this client connection by setting // the status to special values, for example: kill or graceful shutdown. // The client connection would detect the events when it fails to change status // by CAS operation, it would then take some actions accordingly. // 通常情况下,客户端连接状态在[dispatching] <=> [reading]之间变化。 // 当某个事件发生时,服务器可以通过设置来通知这个客户端连接 // 将状态设置为特殊值,例如:kill或graceful shutdown。 // 当CAS操作改变状态失败时,客户端连接将检测到事件,然后采取相应的动作。 for { if !atomic.CompareAndSwapInt32(&cc.status, connStatusDispatching, connStatusReading) || // The judge below will not be hit by all means, // But keep it stayed as a reminder and for the code reference for connStatusWaitShutdown. atomic.LoadInt32(&cc.status) == connStatusWaitShutdown { return } ... }
// dispatch handles client request based on command which is the first byte of the data. // It also gets a token from server which is used to limit the concurrently handling clients. // The most frequently used command is ComQuery. // dispatch根据命令处理客户端请求,命令是数据的第一个字节。 // 它也从服务器获取一个令牌,用于限制并发处理客户端。 // 最常用的命令是ComQuery。 func(cc *clientConn)dispatch(ctx context.Context, data []byte)error
下面的方法都是dispatch的过程顺序逻辑
1 2 3 4 5 6 7 8 9 10
deferfunc() { // reset killed for each request atomic.StoreUint32(&cc.ctx.GetSessionVars().Killed, 0) }() t := time.Now() if (cc.ctx.Status() & mysql.ServerStatusInTrans) > 0 { connIdleDurationHistogramInTxn.Observe(t.Sub(cc.lastActive).Seconds()) } else { connIdleDurationHistogramNotInTxn.Observe(t.Sub(cc.lastActive).Seconds()) }
vars := cc.ctx.GetSessionVars() // reset killed for each request atomic.StoreUint32(&vars.Killed, 0) if cmd < mysql.ComEnd { cc.ctx.SetCommandValue(cmd) }
switch cmd { case mysql.ComSleep: // TODO: According to mysql document, this command is supposed to be used only internally. // So it's just a temp fix, not sure if it's done right. // Investigate this command and write test case later. returnnil case mysql.ComQuit: return io.EOF case mysql.ComInitDB: if err := cc.useDB(ctx, dataStr); err != nil { return err } return cc.writeOK(ctx) case mysql.ComQuery: // Most frequently used command. // For issue 1989 // Input payload may end with byte '\0', we didn't find related mysql document about it, but mysql // implementation accept that case. So trim the last '\0' here as if the payload an EOF string. // See http://dev.mysql.com/doc/internals/en/com-query.html iflen(data) > 0 && data[len(data)-1] == 0 { data = data[:len(data)-1] dataStr = string(hack.String(data)) } return cc.handleQuery(ctx, dataStr) ... }
// handleQuery executes the sql query string and writes result set or result ok to the client. // As the execution time of this function represents the performance of TiDB, we do time log and metrics here. // There is a special query `load data` that does not return result, which is handled differently. // Query `load stats` does not return result either. func(cc *clientConn)handleQuery(ctx context.Context, sql string)(err error)
var pointPlans []plannercore.Plan iflen(stmts) > 1 {
// The client gets to choose if it allows multi-statements, and // probably defaults OFF. This helps prevent against SQL injection attacks // by early terminating the first statement, and then running an entirely // new statement.
capabilities := cc.ctx.GetSessionVars().ClientCapability if capabilities&mysql.ClientMultiStatements < 1 { // The client does not have multi-statement enabled. We now need to determine // how to handle an unsafe situation based on the multiStmt sysvar. switch cc.ctx.GetSessionVars().MultiStatementMode { case variable.OffInt: err = errMultiStatementDisabled return err case variable.OnInt: // multi statement is fully permitted, do nothing default: warn := stmtctx.SQLWarn{Level: stmtctx.WarnLevelWarning, Err: errMultiStatementDisabled} parserWarns = append(parserWarns, warn) } }
// Only pre-build point plans for multi-statement query pointPlans, err = cc.prefetchPointPlanKeys(ctx, stmts) if err != nil { return err } }
for i, stmt := range stmts { iflen(pointPlans) > 0 { // Save the point plan in Session, so we don't need to build the point plan again. cc.ctx.SetValue(plannercore.PointPlanKey, plannercore.PointPlanVal{Plan: pointPlans[i]}) } retryable, err = cc.handleStmt(ctx, stmt, parserWarns, i == len(stmts)-1) if err != nil { if !retryable || !errors.ErrorEqual(err, storeerr.ErrTiFlashServerTimeout) { break } _, allowTiFlashFallback := cc.ctx.GetSessionVars().AllowFallbackToTiKV[kv.TiFlash] if !allowTiFlashFallback { break } // When the TiFlash server seems down, we append a warning to remind the user to check the status of the TiFlash // server and fallback to TiKV. warns := append(parserWarns, stmtctx.SQLWarn{Level: stmtctx.WarnLevelError, Err: err}) delete(cc.ctx.GetSessionVars().IsolationReadEngines, kv.TiFlash) _, err = cc.handleStmt(ctx, stmt, warns, i == len(stmts)-1) cc.ctx.GetSessionVars().IsolationReadEngines[kv.TiFlash] = struct{}{} if err != nil { break } } }
如果有目标计划的话,那么只需要在上下文中设置value即可,不需要再次构建目标计划
cc.handleStmt(ctx, stmt, parserWarns, i == len(stmts)-1) 这是我们的核心中的核心,这里面就是处理抽象语法树的逻辑,包含了逻辑优化, 物理优化, 执行器,tikv交互等等
// The first return value indicates whether the call of handleStmt has no side effect and can be retried. // Currently, the first return value is used to fall back to TiKV when TiFlash is down. // 第一个返回值表示调用handleStmt是否没有副作用,是否可以重试 // 当前,第一个返回值用于在TiFlash down时回落到TiKV func(cc *clientConn)handleStmt(ctx context.Context, stmt ast.StmtNode, warns []stmtctx.SQLWarn, lastStmt bool)(bool, error)