From 15c4c8cd167f200431364d8af7ec692453e09d82 Mon Sep 17 00:00:00 2001 From: wjHuang Date: Sun, 28 Apr 2024 17:16:27 +0800 Subject: [PATCH] *: add metadata lock when using the plan cache (#51897) close pingcap/tidb#51407 --- pkg/ddl/tests/metadatalock/BUILD.bazel | 3 +- pkg/ddl/tests/metadatalock/mdl_test.go | 98 +++++++++++++++++++ pkg/domain/plan_replayer_dump.go | 1 + pkg/executor/executor.go | 1 + pkg/executor/prepared.go | 5 +- pkg/meta/meta.go | 2 + pkg/parser/model/model.go | 3 + pkg/planner/core/plan_cache.go | 36 ++++++- pkg/planner/core/plan_cache_utils.go | 60 +++++++++++- pkg/planner/core/preprocess.go | 7 +- .../core/tests/prepare/prepare_test.go | 17 ++-- pkg/server/driver_tidb.go | 2 +- pkg/session/session.go | 2 +- pkg/sessionctx/stmtctx/stmtctx.go | 3 + 14 files changed, 219 insertions(+), 21 deletions(-) diff --git a/pkg/ddl/tests/metadatalock/BUILD.bazel b/pkg/ddl/tests/metadatalock/BUILD.bazel index 1aa96403e0207..0314bcf91fd6d 100644 --- a/pkg/ddl/tests/metadatalock/BUILD.bazel +++ b/pkg/ddl/tests/metadatalock/BUILD.bazel @@ -8,10 +8,11 @@ go_test( "mdl_test.go", ], flaky = True, - shard_count = 34, + shard_count = 36, deps = [ "//pkg/config", "//pkg/ddl", + "//pkg/ddl/ingest/testutil", "//pkg/errno", "//pkg/server", "//pkg/testkit", diff --git a/pkg/ddl/tests/metadatalock/mdl_test.go b/pkg/ddl/tests/metadatalock/mdl_test.go index 667e8b055284c..467affc1b974d 100644 --- a/pkg/ddl/tests/metadatalock/mdl_test.go +++ b/pkg/ddl/tests/metadatalock/mdl_test.go @@ -21,6 +21,7 @@ import ( "time" "github.com/pingcap/failpoint" + ingesttestutil "github.com/pingcap/tidb/pkg/ddl/ingest/testutil" mysql "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/testkit" @@ -895,6 +896,103 @@ func TestMDLPreparePlanCacheInvalid(t *testing.T) { tk.MustQuery(`execute stmt_test_1 using @a;`).Check(testkit.Rows("1 ", "2 ", "3 ", "4 ")) } +func TestMDLPreparePlanCacheExecute(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + defer ingesttestutil.InjectMockBackendMgr(t, store)() + + sv := server.CreateMockServer(t, store) + + sv.SetDomain(dom) + dom.InfoSyncer().SetSessionManager(sv) + defer sv.Close() + + conn1 := server.CreateMockConn(t, sv) + tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) + conn2 := server.CreateMockConn(t, sv) + tkDDL := testkit.NewTestKitWithSession(t, store, conn2.Context().Session) + tk.MustExec("use test") + tk.MustExec("set global tidb_enable_metadata_lock=1") + tk.MustExec("create table t(a int);") + tk.MustExec("create table t2(a int);") + tk.MustExec("insert into t values(1), (2), (3), (4);") + + tk.MustExec(`prepare stmt_test_1 from 'update t set a = ? where a = ?';`) + tk.MustExec(`set @a = 1, @b = 3;`) + tk.MustExec(`execute stmt_test_1 using @a, @b;`) + + tk.MustExec("begin") + + ch := make(chan struct{}) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + <-ch + tkDDL.MustExec("alter table test.t add index idx(a);") + wg.Done() + }() + + tk.MustQuery("select * from t2") + tk.MustExec(`set @a = 2, @b=4;`) + tk.MustExec(`execute stmt_test_1 using @a, @b;`) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + // The plan is from cache, the metadata lock should be added to block the DDL. + ch <- struct{}{} + + time.Sleep(5 * time.Second) + + tk.MustExec("commit") + + wg.Wait() + + tk.MustExec("admin check table t") +} + +func TestMDLPreparePlanCacheExecute2(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + defer ingesttestutil.InjectMockBackendMgr(t, store)() + + sv := server.CreateMockServer(t, store) + + sv.SetDomain(dom) + dom.InfoSyncer().SetSessionManager(sv) + defer sv.Close() + + conn1 := server.CreateMockConn(t, sv) + tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) + conn2 := server.CreateMockConn(t, sv) + tkDDL := testkit.NewTestKitWithSession(t, store, conn2.Context().Session) + tk.MustExec("use test") + tk.MustExec("set global tidb_enable_metadata_lock=1") + tk.MustExec("create table t(a int);") + tk.MustExec("create table t2(a int);") + tk.MustExec("insert into t values(1), (2), (3), (4);") + + tk.MustExec(`prepare stmt_test_1 from 'select * from t where a = ?';`) + tk.MustExec(`set @a = 1;`) + tk.MustExec(`execute stmt_test_1 using @a;`) + + tk.MustExec("begin") + tk.MustQuery("select * from t2") + + var wg sync.WaitGroup + wg.Add(1) + go func() { + tkDDL.MustExec("alter table test.t add index idx(a);") + wg.Done() + }() + + wg.Wait() + + tk.MustExec(`set @a = 2;`) + tk.MustExec(`execute stmt_test_1 using @a;`) + // The plan should not be from cache because the schema has changed. + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + tk.MustExec("commit") + + tk.MustExec("admin check table t") +} + func TestMDLDisable2Enable(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) sv := server.CreateMockServer(t, store) diff --git a/pkg/domain/plan_replayer_dump.go b/pkg/domain/plan_replayer_dump.go index 68e46e5a03f99..ac2aeca66b833 100644 --- a/pkg/domain/plan_replayer_dump.go +++ b/pkg/domain/plan_replayer_dump.go @@ -754,6 +754,7 @@ func dumpPlanReplayerExplain(ctx sessionctx.Context, zw *zip.Writer, task *PlanR return err } +// extractTableNames extracts table names from the given stmts. func extractTableNames(ctx context.Context, sctx sessionctx.Context, execStmts []ast.StmtNode, curDB model.CIStr) (map[tableNamePair]struct{}, error) { tableExtractor := &tableNameExtractor{ diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index 3bad259bc0724..f72662ad62ad1 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -2026,6 +2026,7 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { vars.SQLKiller.Reset() vars.SQLKiller.ConnID = vars.ConnectionID vars.StmtCtx.TableStats = make(map[int64]any) + sc.MDLRelatedTableIDs = make(map[int64]struct{}) isAnalyze := false if execStmt, ok := s.(*ast.ExecuteStmt); ok { diff --git a/pkg/executor/prepared.go b/pkg/executor/prepared.go index c9ae6edceb707..3b5e0b1daeaef 100644 --- a/pkg/executor/prepared.go +++ b/pkg/executor/prepared.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" @@ -119,7 +120,7 @@ func (e *PrepareExec) Next(ctx context.Context, _ *chunk.Chunk) error { return err } } - stmt, p, paramCnt, err := plannercore.GeneratePlanCacheStmtWithAST(ctx, e.Ctx(), true, stmt0.Text(), stmt0, nil) + stmt, p, paramCnt, err := plannercore.GeneratePlanCacheStmtWithAST(ctx, e.Ctx(), true, stmt0.Text(), stmt0, sessiontxn.GetTxnManager(e.Ctx()).GetTxnInfoSchema()) if err != nil { return err } @@ -208,7 +209,7 @@ func (e *DeallocateExec) Next(context.Context, *chunk.Chunk) error { if e.Ctx().GetSessionVars().EnablePreparedPlanCache { bindSQL, _ := bindinfo.MatchSQLBindingForPlanCache(e.Ctx(), preparedObj.PreparedAst.Stmt, &preparedObj.BindingInfo) cacheKey, err := plannercore.NewPlanCacheKey(vars, preparedObj.StmtText, preparedObj.StmtDB, preparedObj.SchemaVersion, - 0, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load()) + 0, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load(), preparedObj.RelateVersion) if err != nil { return err } diff --git a/pkg/meta/meta.go b/pkg/meta/meta.go index 3eb5f6e11247a..9d176d51f32d9 100644 --- a/pkg/meta/meta.go +++ b/pkg/meta/meta.go @@ -942,6 +942,8 @@ func (m *Meta) UpdateTable(dbID int64, tableInfo *model.TableInfo) error { return errors.Trace(err) } + tableInfo.Revision++ + data, err := json.Marshal(tableInfo) if err != nil { return errors.Trace(err) diff --git a/pkg/parser/model/model.go b/pkg/parser/model/model.go index 88056ae9a40ae..2fe93a94945e5 100644 --- a/pkg/parser/model/model.go +++ b/pkg/parser/model/model.go @@ -542,6 +542,9 @@ type TableInfo struct { TTLInfo *TTLInfo `json:"ttl_info"` + // Revision is per table schema's version, it will be increased when the schema changed. + Revision uint64 `json:"revision"` + DBID int64 `json:"-"` } diff --git a/pkg/planner/core/plan_cache.go b/pkg/planner/core/plan_cache.go index d7c793374c881..9f5789f49e600 100644 --- a/pkg/planner/core/plan_cache.go +++ b/pkg/planner/core/plan_cache.go @@ -104,8 +104,33 @@ func planCachePreprocess(ctx context.Context, sctx sessionctx.Context, isNonPrep return errors.Trace(err) } - // step 3: check schema version - if stmt.SchemaVersion != is.SchemaMetaVersion() { + // step 3: add metadata lock and check each table's schema version + schemaNotMatch := false + for i := 0; i < len(stmt.dbName); i++ { + _, ok := is.TableByID(stmt.tbls[i].Meta().ID) + if !ok { + tblByName, err := is.TableByName(stmt.dbName[i], stmt.tbls[i].Meta().Name) + if err != nil { + return plannererrors.ErrSchemaChanged.GenWithStack("Schema change caused error: %s", err.Error()) + } + delete(stmt.RelateVersion, stmt.tbls[i].Meta().ID) + stmt.tbls[i] = tblByName + stmt.RelateVersion[tblByName.Meta().ID] = tblByName.Meta().Revision + } + newTbl, err := tryLockMDLAndUpdateSchemaIfNecessary(sctx.GetPlanCtx(), stmt.dbName[i], stmt.tbls[i], is) + if err != nil { + schemaNotMatch = true + continue + } + if stmt.tbls[i].Meta().Revision != newTbl.Meta().Revision { + schemaNotMatch = true + } + stmt.tbls[i] = newTbl + stmt.RelateVersion[newTbl.Meta().ID] = newTbl.Meta().Revision + } + + // step 4: check schema version + if schemaNotMatch || stmt.SchemaVersion != is.SchemaMetaVersion() { // In order to avoid some correctness issues, we have to clear the // cached plan once the schema version is changed. // Cached plan in prepared struct does NOT have a "cache key" with @@ -127,7 +152,7 @@ func planCachePreprocess(ctx context.Context, sctx sessionctx.Context, isNonPrep stmt.SchemaVersion = is.SchemaMetaVersion() } - // step 4: handle expiration + // step 5: handle expiration // If the lastUpdateTime less than expiredTimeStamp4PC, // it means other sessions have executed 'admin flush instance plan_cache'. // So we need to clear the current session's plan cache. @@ -138,6 +163,7 @@ func planCachePreprocess(ctx context.Context, sctx sessionctx.Context, isNonPrep stmt.PointGet.Plan = nil vars.LastUpdateTime4PC = expiredTimeStamp4PC } + return nil } @@ -189,7 +215,7 @@ func GetPlanFromSessionPlanCache(ctx context.Context, sctx sessionctx.Context, latestSchemaVersion = domain.GetDomain(sctx).InfoSchema().SchemaMetaVersion() } if cacheKey, err = NewPlanCacheKey(sctx.GetSessionVars(), stmt.StmtText, - stmt.StmtDB, stmt.SchemaVersion, latestSchemaVersion, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load()); err != nil { + stmt.StmtDB, stmt.SchemaVersion, latestSchemaVersion, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load(), stmt.RelateVersion); err != nil { return nil, nil, err } } @@ -334,7 +360,7 @@ func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isNonPrepared if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmtAst.Stmt, sessVars) { delete(sessVars.IsolationReadEngines, kv.TiFlash) if cacheKey, err = NewPlanCacheKey(sessVars, stmt.StmtText, stmt.StmtDB, - stmt.SchemaVersion, latestSchemaVersion, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load()); err != nil { + stmt.SchemaVersion, latestSchemaVersion, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load(), stmt.RelateVersion); err != nil { return nil, nil, err } sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{} diff --git a/pkg/planner/core/plan_cache_utils.go b/pkg/planner/core/plan_cache_utils.go index da551e1465268..8881ad53440b8 100644 --- a/pkg/planner/core/plan_cache_utils.go +++ b/pkg/planner/core/plan_cache_utils.go @@ -19,6 +19,7 @@ import ( "context" "math" "slices" + "sort" "strconv" "time" "unsafe" @@ -36,6 +37,7 @@ import ( "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/types" driver "github.com/pingcap/tidb/pkg/types/parser_driver" "github.com/pingcap/tidb/pkg/util/codec" @@ -44,9 +46,11 @@ import ( "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/kvcache" + "github.com/pingcap/tidb/pkg/util/logutil" utilpc "github.com/pingcap/tidb/pkg/util/plancache" "github.com/pingcap/tidb/pkg/util/size" atomic2 "go.uber.org/atomic" + "go.uber.org/zap" ) const ( @@ -180,6 +184,26 @@ func GeneratePlanCacheStmtWithAST(ctx context.Context, sctx sessionctx.Context, features := new(PlanCacheQueryFeatures) paramStmt.Accept(features) + // Collect information for metadata lock. + dbName := make([]model.CIStr, 0, len(vars.StmtCtx.MDLRelatedTableIDs)) + tbls := make([]table.Table, 0, len(vars.StmtCtx.MDLRelatedTableIDs)) + relateVersion := make(map[int64]uint64, len(vars.StmtCtx.MDLRelatedTableIDs)) + for id := range vars.StmtCtx.MDLRelatedTableIDs { + tbl, ok := is.TableByID(id) + if !ok { + logutil.BgLogger().Error("table not found in info schema", zap.Int64("tableID", id)) + return nil, nil, 0, errors.New("table not found in info schema") + } + db, ok := is.SchemaByID(tbl.Meta().DBID) + if !ok { + logutil.BgLogger().Error("database not found in info schema", zap.Int64("dbID", tbl.Meta().DBID)) + return nil, nil, 0, errors.New("database not found in info schema") + } + dbName = append(dbName, db.Name) + tbls = append(tbls, tbl) + relateVersion[id] = tbl.Meta().Revision + } + preparedObj := &PlanCacheStmt{ PreparedAst: prepared, StmtDB: vars.CurrentDB, @@ -192,7 +216,10 @@ func GeneratePlanCacheStmtWithAST(ctx context.Context, sctx sessionctx.Context, StmtCacheable: cacheable, UncacheableReason: reason, QueryFeatures: features, + dbName: dbName, + tbls: tbls, SchemaVersion: ret.InfoSchema.SchemaMetaVersion(), + RelateVersion: relateVersion, Params: extractor.markers, } if err = CheckPreparedPriv(sctx, preparedObj, ret.InfoSchema); err != nil { @@ -211,6 +238,7 @@ type planCacheKey struct { connID uint64 stmtText string schemaVersion int64 + tblVersionMap map[int64]uint64 // Only be set in rc or for update read and leave it default otherwise. // In Rc or ForUpdateRead, we should check whether the information schema has been changed when using plan cache. @@ -232,6 +260,23 @@ type planCacheKey struct { hash []byte } +func hashInt64Uint64Map(b []byte, m map[int64]uint64) []byte { + keys := make([]int64, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + + for _, k := range keys { + v := m[k] + b = codec.EncodeInt(b, k) + b = codec.EncodeUint(b, v) + } + return b +} + // Hash implements Key interface. func (key *planCacheKey) Hash() []byte { if len(key.hash) == 0 { @@ -242,6 +287,7 @@ func (key *planCacheKey) Hash() []byte { key.hash = codec.EncodeInt(key.hash, int64(key.connID)) key.hash = append(key.hash, hack.Slice(key.stmtText)...) key.hash = codec.EncodeInt(key.hash, key.schemaVersion) + key.hash = hashInt64Uint64Map(key.hash, key.tblVersionMap) key.hash = codec.EncodeInt(key.hash, key.lastUpdatedSchemaVersion) key.hash = codec.EncodeInt(key.hash, int64(key.sqlMode)) key.hash = codec.EncodeInt(key.hash, int64(key.timezoneOffset)) @@ -301,8 +347,7 @@ func SetPstmtIDSchemaVersion(key kvcache.Key, stmtText string, schemaVersion int // NewPlanCacheKey creates a new planCacheKey object. // Note: lastUpdatedSchemaVersion will only be set in the case of rc or for update read in order to // differentiate the cache key. In other cases, it will be 0. -func NewPlanCacheKey(sessionVars *variable.SessionVars, stmtText, stmtDB string, schemaVersion int64, - lastUpdatedSchemaVersion int64, bindSQL string, exprBlacklistTS int64) (kvcache.Key, error) { +func NewPlanCacheKey(sessionVars *variable.SessionVars, stmtText, stmtDB string, schemaVersion, lastUpdatedSchemaVersion int64, bindSQL string, exprBlacklistTS int64, relatedSchemaVersion map[int64]uint64) (kvcache.Key, error) { if stmtText == "" { return nil, errors.New("no statement text") } @@ -323,6 +368,7 @@ func NewPlanCacheKey(sessionVars *variable.SessionVars, stmtText, stmtDB string, connID: sessionVars.ConnectionID, stmtText: stmtText, schemaVersion: schemaVersion, + tblVersionMap: make(map[int64]uint64), lastUpdatedSchemaVersion: lastUpdatedSchemaVersion, sqlMode: sessionVars.SQLMode, timezoneOffset: timezoneOffset, @@ -338,6 +384,9 @@ func NewPlanCacheKey(sessionVars *variable.SessionVars, stmtText, stmtDB string, for k, v := range sessionVars.IsolationReadEngines { key.isolationReadEngines[k] = v } + for k, v := range relatedSchemaVersion { + key.tblVersionMap[k] = v + } return key, nil } @@ -464,6 +513,9 @@ type PlanCacheStmt struct { // below fields are for PointGet short path SchemaVersion int64 + // RelateVersion stores the true cache plan table schema version, since each table schema can be updated separately in transaction. + RelateVersion map[int64]uint64 + StmtCacheable bool // Whether this stmt is cacheable. UncacheableReason string // Why this stmt is uncacheable. QueryFeatures *PlanCacheQueryFeatures @@ -483,6 +535,10 @@ type PlanCacheStmt struct { // NormalizedSQL4PC: select * from `test` . `t` where `a` > ? and `b` < ? --> schema name is added, // StmtText: select * from t where a>1 and b just format the original query; StmtText string + + // dbName and tbls are used to add metadata lock. + dbName []model.CIStr + tbls []table.Table } // GetPreparedStmt extract the prepared statement from the execute statement. diff --git a/pkg/planner/core/preprocess.go b/pkg/planner/core/preprocess.go index f16c0bae41332..513f386238915 100644 --- a/pkg/planner/core/preprocess.go +++ b/pkg/planner/core/preprocess.go @@ -1818,6 +1818,12 @@ func tryLockMDLAndUpdateSchemaIfNecessary(sctx PlanContext, dbName model.CIStr, return tbl, nil } tableInfo := tbl.Meta() + var err error + defer func() { + if err == nil && !skipLock { + sctx.GetSessionVars().StmtCtx.MDLRelatedTableIDs[tbl.Meta().ID] = struct{}{} + } + }() if _, ok := sctx.GetSessionVars().GetRelatedTableForMDL().Load(tableInfo.ID); !ok { if se, ok := is.(*infoschema.SessionExtendedInfoSchema); ok && skipLock && se.MdlTables != nil { if _, ok := se.MdlTables.TableByID(tableInfo.ID); ok { @@ -1836,7 +1842,6 @@ func tryLockMDLAndUpdateSchemaIfNecessary(sctx PlanContext, dbName model.CIStr, dom := domain.GetDomain(sctx) domainSchema := dom.InfoSchema() domainSchemaVer := domainSchema.SchemaMetaVersion() - var err error tbl, err = domainSchema.TableByName(dbName, tableInfo.Name) if err != nil { if !skipLock { diff --git a/pkg/planner/core/tests/prepare/prepare_test.go b/pkg/planner/core/tests/prepare/prepare_test.go index 2ba67bb277b1c..9dbcf78e6f502 100644 --- a/pkg/planner/core/tests/prepare/prepare_test.go +++ b/pkg/planner/core/tests/prepare/prepare_test.go @@ -1449,14 +1449,15 @@ func verifyCache(ctx context.Context, t *testing.T, tk1 *testkit.TestKit, tk2 *t tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) // Change infoSchema version which will make the plan cache invalid in the next execute - tk2.MustExec("alter table t1 drop column c") - tk1.MustExec("execute s") - tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - // Now the plan cache will be valid - rs, err = tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test()) - require.NoError(t, err) - require.NoError(t, rs.Close()) - tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + // DDL is blocked by MDL. + //tk2.MustExec("alter table t1 drop column c") + //tk1.MustExec("execute s") + //tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + //// Now the plan cache will be valid + //rs, err = tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test()) + //require.NoError(t, err) + //require.NoError(t, rs.Close()) + //tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) } func TestCacheHitInRc(t *testing.T) { diff --git a/pkg/server/driver_tidb.go b/pkg/server/driver_tidb.go index 653205ca73745..93faa9abd9a99 100644 --- a/pkg/server/driver_tidb.go +++ b/pkg/server/driver_tidb.go @@ -203,7 +203,7 @@ func (ts *TiDBStatement) Close() error { } bindSQL, _ := bindinfo.MatchSQLBindingForPlanCache(ts.ctx, preparedObj.PreparedAst.Stmt, &preparedObj.BindingInfo) cacheKey, err := core.NewPlanCacheKey(ts.ctx.GetSessionVars(), preparedObj.StmtText, preparedObj.StmtDB, - preparedObj.SchemaVersion, 0, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load()) + preparedObj.SchemaVersion, 0, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load(), preparedObj.RelateVersion) if err != nil { return err } diff --git a/pkg/session/session.go b/pkg/session/session.go index 37d81b13ce88d..cf870b62ca725 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -301,7 +301,7 @@ func (s *session) cleanRetryInfo() { stmtText, stmtDB = preparedObj.StmtText, preparedObj.StmtDB bindSQL, _ := bindinfo.MatchSQLBindingForPlanCache(s.pctx, preparedObj.PreparedAst.Stmt, &preparedObj.BindingInfo) cacheKey, err = plannercore.NewPlanCacheKey(s.sessionVars, stmtText, stmtDB, preparedObj.SchemaVersion, - 0, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load()) + 0, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load(), preparedObj.RelateVersion) if err != nil { logutil.Logger(s.currentCtx).Warn("clean cached plan failed", zap.Error(err)) return diff --git a/pkg/sessionctx/stmtctx/stmtctx.go b/pkg/sessionctx/stmtctx/stmtctx.go index 35ed290715ebe..706d902b90b3a 100644 --- a/pkg/sessionctx/stmtctx/stmtctx.go +++ b/pkg/sessionctx/stmtctx/stmtctx.go @@ -434,6 +434,9 @@ type StatementContext struct { value *uint64 eval func() (uint64, error) } + + // MDLRelatedTableIDs is used to store the table IDs that are related to the current MDL lock. + MDLRelatedTableIDs map[int64]struct{} } var defaultErrLevels = func() (l errctx.LevelMap) {