From 1ce199d865e64647c85de5465ec11d20fb49462b Mon Sep 17 00:00:00 2001 From: Ping Yu Date: Thu, 24 Aug 2023 18:11:33 +0800 Subject: [PATCH] wip Signed-off-by: Ping Yu --- Dockerfile | 5 +- config.yaml | 3 +- .../concepts/explore-tikv-features/api-v2.md | 133 ++ .../backup-restore-cn.md | 182 +++ .../explore-tikv-features/backup-restore.md | 220 +++ .../7.1/concepts/explore-tikv-features/cas.md | 103 ++ .../explore-tikv-features/cdc/cdc-cn.md | 375 +++++ .../concepts/explore-tikv-features/cdc/cdc.md | 400 +++++ .../distributed-transaction.md | 173 +++ .../explore-tikv-features/fault-tolerance.md | 234 +++ .../explore-tikv-features/overview.md | 22 + .../replication-and-rebalancing.md | 177 +++ .../7.1/concepts/explore-tikv-features/ttl.md | 108 ++ content/docs/7.1/concepts/overview.md | 39 + .../docs/7.1/concepts/tikv-in-5-minutes.md | 284 ++++ content/docs/7.1/concepts/whats-next.md | 31 + .../docs/7.1/deploy/configure/coprocessor.md | 11 + content/docs/7.1/deploy/configure/grpc.md | 11 + .../docs/7.1/deploy/configure/introduction.md | 33 + content/docs/7.1/deploy/configure/limit.md | 77 + .../7.1/deploy/configure/pd-command-line.md | 109 ++ .../deploy/configure/pd-configuration-file.md | 385 +++++ .../docs/7.1/deploy/configure/raftstore.md | 11 + .../docs/7.1/deploy/configure/region-merge.md | 38 + content/docs/7.1/deploy/configure/rocksdb.md | 33 + content/docs/7.1/deploy/configure/security.md | 168 +++ content/docs/7.1/deploy/configure/storage.md | 12 + .../7.1/deploy/configure/tikv-command-line.md | 76 + .../configure/tikv-configuration-file.md | 1326 +++++++++++++++++ content/docs/7.1/deploy/configure/titan.md | 59 + content/docs/7.1/deploy/configure/topology.md | 98 ++ content/docs/7.1/deploy/deploy.md | 66 + content/docs/7.1/deploy/install/install.md | 16 + .../docs/7.1/deploy/install/prerequisites.md | 101 ++ content/docs/7.1/deploy/install/production.md | 167 +++ content/docs/7.1/deploy/install/test.md | 216 +++ content/docs/7.1/deploy/install/verify.md | 75 + content/docs/7.1/deploy/monitor/alert.md | 861 +++++++++++ content/docs/7.1/deploy/monitor/api.md | 66 + content/docs/7.1/deploy/monitor/deploy.md | 225 +++ content/docs/7.1/deploy/monitor/framework.md | 28 + content/docs/7.1/deploy/monitor/grafana.md | 47 + .../docs/7.1/deploy/monitor/key-metrics.md | 117 ++ content/docs/7.1/deploy/monitor/monitor.md | 18 + content/docs/7.1/deploy/operate/maintain.md | 211 +++ content/docs/7.1/deploy/operate/operate.md | 15 + content/docs/7.1/deploy/operate/scale.md | 175 +++ content/docs/7.1/deploy/operate/upgrade.md | 154 ++ .../7.1/deploy/performance/instructions.md | 194 +++ .../docs/7.1/deploy/performance/overview.md | 74 + .../7.1/deploy/performance/performance.md | 14 + content/docs/7.1/develop/clients/cpp.md | 17 + content/docs/7.1/develop/clients/go.md | 243 +++ .../docs/7.1/develop/clients/introduction.md | 33 + content/docs/7.1/develop/clients/java.md | 145 ++ content/docs/7.1/develop/clients/python.md | 17 + content/docs/7.1/develop/clients/rust.md | 147 ++ content/docs/7.1/develop/develop.md | 32 + content/docs/7.1/develop/rawkv/cas.md | 79 + content/docs/7.1/develop/rawkv/checksum.md | 74 + .../docs/7.1/develop/rawkv/get-put-delete.md | 86 ++ .../docs/7.1/develop/rawkv/introduction.md | 19 + content/docs/7.1/develop/rawkv/scan.md | 103 ++ content/docs/7.1/develop/rawkv/ttl.md | 129 ++ content/docs/7.1/new-features/overview.md | 44 + .../docs/7.1/reference/CLI/introduction.md | 15 + content/docs/7.1/reference/CLI/pd-ctl.md | 922 ++++++++++++ content/docs/7.1/reference/CLI/pd-recover.md | 123 ++ content/docs/7.1/reference/CLI/tikv-ctl.md | 399 +++++ content/docs/7.1/reference/TiUP.md | 13 + .../reference/architecture/introduction.md | 16 + .../7.1/reference/architecture/overview.md | 61 + .../7.1/reference/architecture/scheduling.md | 144 ++ .../7.1/reference/architecture/storage.md | 115 ++ .../7.1/reference/architecture/terminology.md | 43 + content/docs/7.1/reference/faq.md | 155 ++ content/docs/7.1/reference/introduction.md | 16 + content/docs/7.1/reference/query-layers.md | 18 + 78 files changed, 10979 insertions(+), 5 deletions(-) create mode 100644 content/docs/7.1/concepts/explore-tikv-features/api-v2.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/backup-restore-cn.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/backup-restore.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/cas.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/cdc/cdc-cn.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/cdc/cdc.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/distributed-transaction.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/fault-tolerance.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/overview.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/replication-and-rebalancing.md create mode 100644 content/docs/7.1/concepts/explore-tikv-features/ttl.md create mode 100644 content/docs/7.1/concepts/overview.md create mode 100644 content/docs/7.1/concepts/tikv-in-5-minutes.md create mode 100644 content/docs/7.1/concepts/whats-next.md create mode 100644 content/docs/7.1/deploy/configure/coprocessor.md create mode 100644 content/docs/7.1/deploy/configure/grpc.md create mode 100644 content/docs/7.1/deploy/configure/introduction.md create mode 100644 content/docs/7.1/deploy/configure/limit.md create mode 100644 content/docs/7.1/deploy/configure/pd-command-line.md create mode 100644 content/docs/7.1/deploy/configure/pd-configuration-file.md create mode 100644 content/docs/7.1/deploy/configure/raftstore.md create mode 100644 content/docs/7.1/deploy/configure/region-merge.md create mode 100644 content/docs/7.1/deploy/configure/rocksdb.md create mode 100644 content/docs/7.1/deploy/configure/security.md create mode 100644 content/docs/7.1/deploy/configure/storage.md create mode 100644 content/docs/7.1/deploy/configure/tikv-command-line.md create mode 100644 content/docs/7.1/deploy/configure/tikv-configuration-file.md create mode 100644 content/docs/7.1/deploy/configure/titan.md create mode 100644 content/docs/7.1/deploy/configure/topology.md create mode 100644 content/docs/7.1/deploy/deploy.md create mode 100644 content/docs/7.1/deploy/install/install.md create mode 100644 content/docs/7.1/deploy/install/prerequisites.md create mode 100644 content/docs/7.1/deploy/install/production.md create mode 100644 content/docs/7.1/deploy/install/test.md create mode 100644 content/docs/7.1/deploy/install/verify.md create mode 100644 content/docs/7.1/deploy/monitor/alert.md create mode 100644 content/docs/7.1/deploy/monitor/api.md create mode 100644 content/docs/7.1/deploy/monitor/deploy.md create mode 100644 content/docs/7.1/deploy/monitor/framework.md create mode 100644 content/docs/7.1/deploy/monitor/grafana.md create mode 100644 content/docs/7.1/deploy/monitor/key-metrics.md create mode 100644 content/docs/7.1/deploy/monitor/monitor.md create mode 100644 content/docs/7.1/deploy/operate/maintain.md create mode 100644 content/docs/7.1/deploy/operate/operate.md create mode 100644 content/docs/7.1/deploy/operate/scale.md create mode 100644 content/docs/7.1/deploy/operate/upgrade.md create mode 100644 content/docs/7.1/deploy/performance/instructions.md create mode 100644 content/docs/7.1/deploy/performance/overview.md create mode 100644 content/docs/7.1/deploy/performance/performance.md create mode 100644 content/docs/7.1/develop/clients/cpp.md create mode 100644 content/docs/7.1/develop/clients/go.md create mode 100644 content/docs/7.1/develop/clients/introduction.md create mode 100644 content/docs/7.1/develop/clients/java.md create mode 100644 content/docs/7.1/develop/clients/python.md create mode 100644 content/docs/7.1/develop/clients/rust.md create mode 100644 content/docs/7.1/develop/develop.md create mode 100644 content/docs/7.1/develop/rawkv/cas.md create mode 100644 content/docs/7.1/develop/rawkv/checksum.md create mode 100644 content/docs/7.1/develop/rawkv/get-put-delete.md create mode 100644 content/docs/7.1/develop/rawkv/introduction.md create mode 100644 content/docs/7.1/develop/rawkv/scan.md create mode 100644 content/docs/7.1/develop/rawkv/ttl.md create mode 100644 content/docs/7.1/new-features/overview.md create mode 100644 content/docs/7.1/reference/CLI/introduction.md create mode 100644 content/docs/7.1/reference/CLI/pd-ctl.md create mode 100644 content/docs/7.1/reference/CLI/pd-recover.md create mode 100644 content/docs/7.1/reference/CLI/tikv-ctl.md create mode 100644 content/docs/7.1/reference/TiUP.md create mode 100644 content/docs/7.1/reference/architecture/introduction.md create mode 100644 content/docs/7.1/reference/architecture/overview.md create mode 100644 content/docs/7.1/reference/architecture/scheduling.md create mode 100644 content/docs/7.1/reference/architecture/storage.md create mode 100644 content/docs/7.1/reference/architecture/terminology.md create mode 100644 content/docs/7.1/reference/faq.md create mode 100644 content/docs/7.1/reference/introduction.md create mode 100644 content/docs/7.1/reference/query-layers.md diff --git a/Dockerfile b/Dockerfile index 9cdd695c..467df4d7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,9 +2,6 @@ FROM alpine:latest RUN apk update RUN apk add hugo yarn make git -RUN adduser builder -D -USER builder - RUN mkdir -p /home/builder/build WORKDIR /home/builder/build COPY package.json /home/builder/build @@ -12,5 +9,5 @@ COPY yarn.lock /home/builder/build RUN yarn COPY . /home/builder/build -CMD /bin/sh -c "yarn && make serve-production" +CMD /bin/sh -c "git config --global --add safe.directory /home/builder/build && yarn && make serve-production" EXPOSE 1313 diff --git a/config.yaml b/config.yaml index 24da4e90..1e485409 100644 --- a/config.yaml +++ b/config.yaml @@ -34,9 +34,10 @@ params: favicon: "favicon.png" googleAnalyticsId: "UA-130734531-1" versions: - latest: "6.5" + latest: "7.1" all: - "dev" + - "7.1" - "6.5" - "6.1" - "5.1" diff --git a/content/docs/7.1/concepts/explore-tikv-features/api-v2.md b/content/docs/7.1/concepts/explore-tikv-features/api-v2.md new file mode 100644 index 00000000..6f937d59 --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/api-v2.md @@ -0,0 +1,133 @@ +--- +title: TiKV API v2 +description: What's TiKV API v2 and how to use it +menu: + "7.1": + parent: Features-7.1 + weight: 6 + identifier: TiKV API v2-7.1 +--- + +This page introduces what's TiKV API v2 and how to use it. + +## TiKV API v2 + +Before TiKV v6.1.0, RawKV interfaces just store the raw data from clients, so it only provides basic read/write ability of key-values. Besides, due to different encoding and lacking of isolation, TiDB, TxnKV, and RawKV can not be used simultaneously in the same TiKV cluster. In this scenario, multiple clusters must be deployed, and make costs of resource and maintenance increase. + +TiKV API v2 provides new storage format, including: + +- RawKV organizes data as [MVCC], records update timestamp of every entry, and provides [CDC] feature based on the timestamp (RawKV [CDC] is an experimental feature provided by another component, see [TiKV-CDC]). +- Data in TiKV is separated by modes, supports using TiDB, TxnKV, and RawKV in a single cluster at the same time. +- `Key Space` field is reserved, to support multi-tenant in the future. + +To use TiKV API v2, please add or modify `api-version = 2` & `enable-ttl = true` in `[storage]` section of TiKV. See [configuration file](https://docs.pingcap.com/tidb/dev/tikv-configuration-file#api-version-new-in-v610) for detail. + +Besides, when API V2 is enabled, you need to deploy at least one tidb-server instance to reclaim expired data of [MVCC]. To ensure high availability, you can deploy multiple tidb-server instances. Note that these tidb-server instances can also be used as normal TiDB database. + +> Warning +> - Due to the significant change on storage format, **only if** the existing TiKV cluster is empty or storing **only** TiDB data, you can enable or disable API v2 smoothly. In other scenario, you must deploy a new cluster, and migrate data using [TiKV-BR]. +> - After API V2 is enabled, you **cannot** downgrade the TiKV cluster to a version earlier than v6.1.0. Otherwise, data corruption might occur. + +## Usage Demonstration + +### Prerequisites + +Before you start, ensure that you have installed TiUP according to [TiKV in 5 Minutes](../../tikv-in-5-minutes). + +### Step 1: Config TiKV to enable API v2 + +To enable API v2, create a file `tikv.yaml` using the following configuration. + +```yaml +[storage] +api-version = 2 +enable-ttl = true +``` + +### Step 2: Start TiKV Cluster + +```bash +tiup playground nightly --db 1 --tiflash 0 --pd 1 --kv 1 --kv.config tikv.yaml +``` + +### Step 3: Write the code to test API v2 + +Take [Go client] as example, save the following script to file `test_api_v2.go`. + +```go +package main + +import ( + "context" + "fmt" + + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/tikv/client-go/v2/rawkv" +) + +func main() { + cli, err := rawkv.NewClientWithOpts(context.TODO(), []string{"127.0.0.1:2379"}, + rawkv.WithAPIVersion(kvrpcpb.APIVersion_V2)) + if err != nil { + panic(err) + } + defer cli.Close() + + fmt.Printf("cluster ID: %d\n", cli.ClusterID()) + + key := []byte("Company") + val := []byte("PingCAP") + + // put key into tikv + err = cli.Put(context.TODO(), key, val) + if err != nil { + panic(err) + } + fmt.Printf("Successfully put %s:%s to tikv\n", key, val) + + // get key from tikv + val, err = cli.Get(context.TODO(), key) + if err != nil { + panic(err) + } + fmt.Printf("found val: %s for key: %s\n", val, key) + + // delete key from tikv + err = cli.Delete(context.TODO(), key) + if err != nil { + panic(err) + } + fmt.Printf("key: %s deleted\n", key) + + // get key again from tikv + val, err = cli.Get(context.TODO(), key) + if err != nil { + panic(err) + } + fmt.Printf("found val: %s for key: %s\n", val, key) +} +``` + +### Step 4: Run the code + +```bash +❯ go mod tidy +❯ go run test_api_v2.go +[2022/11/02 21:23:10.507 +08:00] [INFO] [client.go:405] ["[pd] create pd client with endpoints"] [pd-address="[172.16.5.32:32379]"] +[2022/11/02 21:23:10.513 +08:00] [INFO] [base_client.go:378] ["[pd] switch leader"] [new-leader=http://172.16.5.32:32379] [old-leader=] +[2022/11/02 21:23:10.513 +08:00] [INFO] [base_client.go:105] ["[pd] init cluster id"] [cluster-id=7153087019074699621] +[2022/11/02 21:23:10.514 +08:00] [INFO] [client.go:698] ["[pd] tso dispatcher created"] [dc-location=global] +cluster ID: 7153087019074699621 +Successfully put Company:PingCAP to tikv +found val: PingCAP for key: Company +key: Company deleted +found val: for key: Company +[2022/11/02 21:23:10.532 +08:00] [INFO] [client.go:779] ["[pd] stop fetching the pending tso requests due to context canceled"] [dc-location=global] +[2022/11/02 21:23:10.533 +08:00] [INFO] [client.go:716] ["[pd] exit tso dispatcher"] [dc-location=global] +``` + +[MVCC]: https://en.wikipedia.org/wiki/Multiversion_concurrency_control +[CDC]: https://en.wikipedia.org/wiki/Change_data_capture +[TiKV-CDC]: https://github.com/tikv/migration/blob/main/cdc/README.md +[TiKV-BR]: https://github.com/tikv/migration/blob/main/br/README.md +[Go client]: https://github.com/tikv/client-go/wiki/RawKV-Basic diff --git a/content/docs/7.1/concepts/explore-tikv-features/backup-restore-cn.md b/content/docs/7.1/concepts/explore-tikv-features/backup-restore-cn.md new file mode 100644 index 00000000..7235d1b3 --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/backup-restore-cn.md @@ -0,0 +1,182 @@ +--- +title: RawKV BR 使用手册 +description: 如何使用 RawKV BR +menu: + "7.1": + parent: RawKV BR-7.1 + weight: 1 + identifier: RawKV BR CN-7.1 +--- + +本文是 RawKV BR 的使用手册。 + + +**[TiKV Backup & Restore (TiKV-BR)]** 是 TiKV 分布式备份恢复的命令行工具,用于对 TiKV 集群进行数据备份和恢复。 + +## 工作原理 +TiKV-BR 将备份或恢复操作命令下发到各个 TiKV 节点。TiKV 收到命令后执行相应的备份或恢复操作。 在一次备份或恢复中,各个 TiKV 节点都会有一个对应的备份路径,TiKV 备份时产生的备份文件将会保存在该路径下,恢复时也会从该路径读取相应的备份文件。 +{{< figure + src="/img/docs/tikv-br.png" + caption="TiKV-BR 工作原理" + number="1" >}} + +### 安装 + +#### 使用 TiUP 安装 + +`tikv-br` 是 [TiUP] 的一个组件,你可以使用 [TiUP] 来部署 `tikv-br`: +```bash +tiup tikv-br:v1.1.0 <命令> <子命令> +``` +如果是第一次使用,TiUP 会自动下载和安装 `tikv-br` 组件。 + + +#### 手工安装 +你可以从 [GitHub] 下载 `tikv-br` 的最新发行版。 + +### 推荐部署配置 +- 生产环境中,推荐 TiKV-BR 运行在(4 核+/8 GB+)的节点上。操作系统版本要求可参考 [操作系统及平台的要求]。 +- 推荐使用 Amazon s3 存储 或者 SSD 网盘,挂载到 TiKV-BR 节点和所有 TiKV 节点上,网盘推荐万兆网卡,否则带宽有可能成为备份恢复时的性能瓶颈。 +- TiKV-BR 只支持版本大于 v5.0.0 的 TiKV 集群中 RawKV 模式数据的备份和恢复。 +- 建议为备份和恢复配置足够的资源: + - TiKV-BR、TiKV 节点和备份存储系统需要提供大于备份速度的的网络带宽。当集群特别大的时候,备份和恢复速度上限受限于备份网络的带宽。 + - 备份存储系统还需要提供足够的写入/读取性能(IOPS),否则它有可能成为备份恢复时的性能瓶颈。 + - TiKV 节点需要为备份准备至少额外的两个 CPU core 和高性能的磁盘,否则备份将对集群上运行的业务产生影响。 + +### 最佳实践 +下面是使用 TiKV-BR 进行备份恢复的几种推荐操作: +- 推荐在业务低峰时执行备份操作,这样能最大程度地减少对业务的影响。 +- TiKV-BR 支持在不同拓扑的集群上执行恢复,但恢复期间对在线业务影响很大,建议低峰期或者限速 (rate-limit) 执行恢复。 +- TiKV-BR 备份最好串行执行。不同备份任务并行会导致备份性能降低,同时也会影响在线业务。 +- TiKV-BR 恢复最好串行执行。不同恢复任务并行会导致 Region 冲突增多,恢复的性能降低。 +- 可以通过指定 `--checksum=true`,在备份、恢复完成后进行一轮数据校验。数据校验将分别计算备份数据与 TiKV 集群中数据的 checksum,并对比二者是否相同。请注意,如果需要进行数据校验,请确保在备份或恢复的全过程,TiKV 集群没有数据变更和 TTL 过期。 +- TiKV-BR 可用于实现 [api-version] 从 V1 到 V2 的集群数据迁移。通过指定 `--dst-api-version V2` 将 `api-version=1` 的 TiKV 集群备份为 V2 格式,然后将备份文件恢复到新的 `api-version=2` TiKV 集群中。 + +### TiKV-BR 命令行描述 +一条 `tikv-br` 命令是由子命令、选项和参数组成的。子命令即不带 `-` 或者 `--` 的字符。选项即以 `-` 或者 `--` 开头的字符。参数即子命令或选项字符后紧跟的、并传递给命令和选项的字符。 +#### 备份集群 Raw 模式数据 +要备份 TiKV 集群中 Raw 模式数据,可使用 `tikv-br backup raw` 命令。该命令的使用帮助可以通过 `tikv-br backup raw --help` 来获取。 +用例:将 TiKV 集群中 Raw 模式数据备份到 s3 `/backup-data/2022-09-16/` 目录中。 +```bash +export AWS_ACCESS_KEY_ID=&{AWS_KEY_ID}; +export AWS_SECRET_ACCESS_KEY=&{AWS_KEY}; +tikv-br backup raw \ + --pd="&{PDIP}:2379" \ + --storage="s3://backup-data/2022-09-16/" \ + --dst-api-version v2 \ + --log-file="/tmp/backupraw.log \ + --gcttl=5m \ + --start="a" \ + --end="z" \ + --format="raw" +``` +命令行各部分的解释如下: +- `backup`:`tikv-br` 的子命令。 +- `raw`:`backup` 的子命令。 +- `-s` 或 `--storage`:备份保存的路径。 +- `"s3://backup-data/2022-09-16/"`:`--storage` 的参数,保存的路径为 S3 的 `/backup-data/2022-09-16/` 目录。 +- `--pd`:`PD` 服务地址。 +- `"${PDIP}:2379"`:`--pd` 的参数。 +- `--dst-api-version`: 指定备份文件的 `api-version`,请见 [TiKV API v2]。 +- `v2`: `--dst-api-version` 的参数,可选参数为 `v1`,`v1ttl`,`v2`(不区分大小写),如果不指定 `dst-api-version` 参数,则备份文件的 `api-version` 与指定 `--pd` 所属的 TiKV 集群 `api-version` 相同。 +- `gcttl`: GC 暂停时长。可用于确保从存量数据备份到 [创建 TiKV-CDC 同步任务] 的这段时间内,增量数据不会被 GC 清除。默认为 5 分钟。 +- `5m`: `gcttl` 的参数,数据格式为`数字 + 时间单位`, 例如 `24h` 表示 24 小时,`60m` 表示 60 分钟。 +- `start`, `end`: 用于指定需要备份的数据区间,为左闭右开区间 `[start, end)`。默认为`["", "")`, 即全部数据。 +- `format`:`start` 和 `end` 的格式,支持 `raw`、[hex] 和 [escaped] 三种格式。 + +备份期间会有进度条在终端中显示,当进度条前进到 100% 时,说明备份已完成。 + +备份完成后,会显示如下所示的信息: +```bash +[2022/09/20 18:01:10.125 +08:00] [INFO] [collector.go:67] ["Raw backup success summary"] [total-ranges=3] [ranges-succeed=3] [ranges-failed=0] [backup-total-regions=3] [total-take=5.050265883s] [backup-ts=436120585518448641] [total-kv=100000] [total-kv-size=108.7MB] [average-speed=21.11MB/s] [backup-data-size(after-compressed)=78.3MB] +``` +以上信息的解释如下: +- `total-ranges`: 备份任务切分成的分片个数, 与 `ranges-succeed` + `ranges-failed` 相等. +- `ranges-succeed`: 成功分片的个数。 +- `ranges-failed`: 失败分片的个数。 +- `backup-total-regions`: 执行备份任务的 TiKV region 个数。 +- `total-take`: 备份任务执行时长。 +- `backup-ts`: 备份时间点,只对 API V2 的 TiKV 集群生效。可以用于 [创建 TiKV-CDC 同步任务] 时的 `start-ts`. +- `total-kv`: 备份文件中键值对的个数。 +- `total-kv-size`: 备份文件中键值对的大小。请注意,这是指压缩前的大小。 +- `average-speed`: 备份速率,约等于 `total-kv-size` / `total-take`。 +- `backup-data-size(after-compressed)`: 备份文件的大小。 + +#### 恢复 Raw 模式备份数据 + +要将 Raw 模式备份数据恢复到集群中来,可使用 `tikv-br restore raw` 命令。该命令的使用帮助可以通过 `tikv-br restore raw --help` 来获取。 +用例:将 s3 `/backup-data/2022-09-16/` 路径中的 Raw 模式备份数据恢复到集群中。 +```bash +export AWS_ACCESS_KEY_ID=&{AWS_KEY_ID}; +export AWS_SECRET_ACCESS_KEY=&{AWS_KEY}; +tikv-br restore raw \ + --pd "${PDIP}:2379" \ + --storage "s3://backup-data/2022-09-16/" \ + --log-file restoreraw.log +``` +以上命令中,`--log-file` 选项指定把 `TiKV-BR` 的 log 写到 `restoreraw.log` 文件中。 +恢复期间会有进度条在终端中显示,当进度条前进到 100% 时,说明恢复已完成。 + +恢复完成后,会显示如下所示的信息: +```bash +[2022/09/20 18:02:12.540 +08:00] [INFO] [collector.go:67] ["Raw restore success summary"] [total-ranges=3] [ranges-succeed=3] [ranges-failed=0] [restore-files=3] [total-take=950.460846ms] [restore-data-size(after-compressed)=78.3MB] [total-kv=100000] [total-kv-size=108.7MB] [average-speed=114.4MB/s] +``` +以上信息的解释如下: +- `total-ranges`: 恢复任务切分成的分片个数, 与 `ranges-succeed` + `ranges-failed` 相等. +- `ranges-succeed`: 成功分片的个数。 +- `ranges-failed`: 失败分片的个数。 +- `restore-files`: 执行恢复的文件个数。 +- `total-take`: 恢复时长。 +- `total-kv`: 恢复文件中键值对的个数。 +- `total-kv-size`: 恢复文件中键值对的大小。请注意,这是指压缩前的大小。 +- `average-speed`: 恢复速率,约等于 `total-kv-size` / `total-take`。 +- `restore-data-size(after-compressed)`: 恢复文件的大小。 + + +### 备份文件的数据校验 + +TiKV-BR 可以在 TiKV 集群备份和恢复操作完成后执行 `checksum` 来确保备份文件的完整性和正确性。 checksum 可以通过 `--checksum` 来开启。 + +Checksum 开启时,备份或恢复操作完成后,会使用 [client-go] 的 [checksum] 接口来计算 TiKV 集群中有效数据的 checksum 结果,并与备份文件保存的 checksum 结果进行对比。 + +注意,当 TiKV 集群启用了 [TTL],如果在备份或恢复过程中出现数据 TTL 过期,此时 TiKV 集群的 checksum 结果跟备份文件的 checksum 会不相同,因此无法在此场景中使用 `checksum`。 + +### 备份恢复操作的安全性 + +TiKV-BR 支持在开启了 [TLS 配置] 的 TiKV 集群中执行备份和恢复操作,你可以通过设置 `--ca`, `--cert` 和 `--key` 参数来指定客户端证书。 + +### 性能 + +TiKV-BR 的备份和恢复都是分布式的,在存储和网络没有达到瓶颈的时候,性能可以随着 TiKV 节点数量线性提升。以下是 TiKV-BR 的性能测试指标,以供参考。 +- TiKV 节点:4 核 CPU, 8G 内存,v6.4.0 +- PD 节点:4 核 CPU, 8G 内存,v6.4.0 +- TiKV-BR 节点:4 核 CPU, 8G 内存,v1.1.0 +- 存储容量: 50TB + +|指标|TiKV API v1|TiKV API v2| +|:-:|:-:|:-:| +|备份速度|每 TiKV 节点 40MB/s|每 TiKV 节点 40MB/s| +|恢复速度|每 TiKV 节点 70MB/s|每 TiKV 节点 70MB/s| +|性能影响|QPS 降低 20%,时延增加 20%|QPS 降低 20%,时延增加 20%| + +#### 性能调优 + +如果你希望减少备份对集群的影响,你可以开启 [auto-tune] 功能。开启该功能后,备份功能会在不过度影响集群正常业务的前提下,以最快的速度进行数据备份。详见[自动调节]。 +或者你也可以使用参数 `--ratelimit` 进行备份限速。 + + +[TiKV Backup & Restore (TiKV-BR)]: https://github.com/tikv/migration/tree/main/br +[TiUP]: https://tiup.io +[GitHub]: https://github.com/tikv/migration/releases +[操作系统及平台的要求]: https://docs.pingcap.com/zh/tidb/dev/hardware-and-software-requirements +[api-version]: ../api-v2 +[TiKV API v2]: ../api-v2 +[创建 TiKV-CDC 同步任务]: ../cdc/cdc-cn/#%E7%AE%A1%E7%90%86%E5%90%8C%E6%AD%A5%E4%BB%BB%E5%8A%A1-changefeed +[hex]: https://zh.wikipedia.org/wiki/%E5%8D%81%E5%85%AD%E8%BF%9B%E5%88%B6 +[escaped]: https://zh.wikipedia.org/wiki/%E8%BD%AC%E4%B9%89%E5%AD%97%E7%AC%A6 +[checksum]: ../../../develop/rawkv/checksum +[client-go]: https://github.com/tikv/client-go +[TTL]: ../ttl +[TLS 配置]: https://docs.pingcap.com/zh/tidb/dev/enable-tls-between-components +[auto-tune]: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file#enable-auto-tune-%E4%BB%8E-v54-%E7%89%88%E6%9C%AC%E5%BC%80%E5%A7%8B%E5%BC%95%E5%85%A5 +[自动调节]: https://docs.pingcap.com/zh/tidb/dev/br-auto-tune \ No newline at end of file diff --git a/content/docs/7.1/concepts/explore-tikv-features/backup-restore.md b/content/docs/7.1/concepts/explore-tikv-features/backup-restore.md new file mode 100644 index 00000000..6e9a5468 --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/backup-restore.md @@ -0,0 +1,220 @@ +--- +title: RawKV BR +description: How to backup and restore RawKV Data +menu: + "7.1": + parent: Features-7.1 + weight: 7 + identifier: RawKV BR-7.1 +--- + +This page introduces what's RawKV BR and how to use it. + +[中文使用手册] + +## RawKV Backup & Restore + +**[TiKV Backup & Restore (TiKV-BR)]** is a command-line tool for distributed backup and restoration of the TiKV cluster data. + +## Architecture + +{{< figure + src="/img/docs/tikv-br.png" + caption="TiKV-BR architecture" + number="1" >}} + +### Installation + +#### Install with TiUP +`tikv-br` is a component of [TiUP], so you can easily use `tikv-br` with [TiUP] as following: +```bash +tiup tikv-br:v1.1.0 +``` +If `tikv-br` is not installed before, TiUP will download it automatically. + +#### Install manually + +Please find the latest release and download the binary from [GitHub]. + +### Recommended Deployment Configuration +- In production environments, deploy `TiKV-BR` on a node with at least 4 cores CPU and 8 GB memory. Select an appropriate OS version by following [Linux OS version requirements]. + +- Save backup data to Amazon S3 or other shared storage, for example mounting a NFS on all `TiKV-BR` and `TiKV` nodes. + +- Allocate sufficient resources for backup and restoration. + +- TiKV-BR only supports raw data backup/restoration in TiKV cluster with version >= `5.0.0`. + +TiKV-BR, TiKV nodes, and the backup storage system should provide network bandwidth that is greater than the backup speed. If the target cluster is particularly large, the threshold of backup and restoration speed highly depends on the bandwidth of storage system. +The backup storage system should also provide sufficient write/read performance (IOPS). Otherwise, the IOPS might become a performance bottleneck during backup or restoration. +TiKV nodes need to have at least two additional spare CPU cores and disk bandwidth (related to `ratelimit` parameter) for backups. Otherwise, the backup might have an impact on the services running on the cluster. + +### Best practice +The following are some recommended operations for using `TiKV-BR` for backup and restoration: +- It is recommended that you perform the backup operation during off-peak hours to minimize the impact on applications. +- `TiKV-BR` supports restoration on clusters of different topologies. However, the online applications will be greatly impacted during the restoration operation. It is recommended that you perform restoration during the off-peak hours or use `ratelimit` to limit the rate. +- It is recommended that you execute multiple backup operations serially. Running different backup operations in parallel reduces backup performance and also affects the online application. +- It is recommended that you execute multiple restoration operations serially. Running different restoration operations in parallel increases Region conflicts and also reduces restoration performance. +- `TiKV-BR` supports checksum between `TiKV` cluster and backup files after backup or restoration with the config `--checksum=true`. Note that, if checksum is enabled, please make sure no data is changed or `TTL` expired in `TiKV` cluster during backup or restoration. +- TiKV-BR supports [api-version] conversion from V1 to V2 with config `--dst-api-version=V2`. Then restore the backup files to API v2 `TiKV` cluster. This is mainly used to upgrade from API V1 cluster to API v2 cluster. + +### TiKV-BR Command Line Description +A tikv-br command consists of sub-commands, options, and parameters. + +- Sub-command: the characters without `-` or `--`, including `backup`, `restore`, `raw` and `help`. +- Option: the characters that start with `-` or `--`. +- Parameter: the characters that immediately follow behind and are passed to the sub-command or the option. +#### Backup Raw Data +To backup the cluster raw data, use the `tikv-br backup raw` command. To get help on this command, execute `tikv-br backup raw -h` or `tikv-br backup raw --help`. +For example, backup raw data in TiKV cluster to s3 `/backup-data/2022-09-16` directory. + +```bash +export AWS_ACCESS_KEY_ID=&{AWS_KEY_ID}; +export AWS_SECRET_ACCESS_KEY=&{AWS_KEY}; +tikv-br backup raw \ + --pd="&{PDIP}:2379" \ + --storage="s3://backup-data/2022-09-16/" \ + --ratelimit=128 \ + --dst-api-version=v2 \ + --log-file="/tmp/br_backup.log \ + --gcttl=5m \ + --start="a" \ + --end="z" \ + --format="raw" +``` +Explanations for some options in the above command are as follows: +- `backup`: Sub-command of `tikv-br`. +- `raw`: Sub-command of `backup`. +- `-s` or `--storage`: Storage of backup files. +- `"s3://backup-data/2022-09-16/"`: Parameter of `-s`, save the backup files in `"s3://backup-data/2022-09-16/"`. +- `--ratelimit`: The maximum speed at which a backup operation is performed on each `TiKV` node. +- `128`: The value of `ratelimit`, unit is MiB/s. +- `--pd`: Service address of `PD`. +- `"${PDIP}:2379"`: Parameter of `--pd`. +- `--dst-api-version`: The `api-version`, please see [API v2]. +- `v2`: Parameter of `--dst-api-version`, the optionals are `v1`, `v1ttl`, `v2`(Case insensitive). If no `dst-api-version` is specified, the `api-version` is the same with TiKV cluster of `--pd`. +- `gcttl`: The pause duration of GC. This can be used to make sure that the incremental data from the beginning of backup to [Creating a replication task] will NOT be deleted by GC. 5 minutes by default. +- `5m`: Parameter of `gcttl`. Its format is `number + unit`, e.g. `24h` means 24 hours, `60m` means 60 minutes. +- `start`, `end`: The backup key range. It's closed left and open right `[start, end)`. +- `format`: Format of `start` and `end`. Supported formats are `raw`、[hex] and [escaped]. + +A progress bar is displayed in the terminal during the backup. When the progress bar advances to 100%, the backup is complete. The progress bar is displayed as follows: +```bash +br backup raw \ + --pd="${PDIP}:2379" \ + --storage="s3://backup-data/2022-09-16/" \ + --log-file=backupraw.log +Backup Raw <---------/................................................> 17.12%. +``` + +After backup finish, the result message is displayed as follows: +```bash +[2022/09/20 18:01:10.125 +08:00] [INFO] [collector.go:67] ["Raw backup success summary"] [total-ranges=3] [ranges-succeed=3] [ranges-failed=0] [backup-total-regions=3] [total-take=5.050265883s] [backup-ts=436120585518448641] [total-kv=100000] [total-kv-size=108.7MB] [average-speed=21.11MB/s] [backup-data-size(after-compressed)=78.3MB] +``` +Explanations for the above message are as follows: +- `total-ranges`: Number of ranges that the whole backup task is split to. Equals to `ranges-succeed` + `ranges-failed`. +- `ranges-succeed`: Number of succeeded ranges. +- `ranges-failed`: Number of failed ranges. +- `backup-total-regions`: The tikv regions that backup takes. +- `total-take`: The backup duration. +- `backup-ts`: The backup start timestamp, only takes effect for API v2 TiKV cluster, which can be used as `start-ts` of `TiKV-CDC` when creating replication tasks. Refer to [Creating a replication task]. +- `total-kv`: Total number of key-value pairs in backup files. +- `total-kv-size`: Total size of key-value pairs in backup files. Note that this is the original size before compression. +- `average-speed`: The backup speed, which approximately equals to `total-kv-size` / `total-take`. +- `backup-data-size(after-compressed)`: The backup file size. + +#### Restore Raw Data + +To restore raw data to the cluster, execute the `tikv-br restore raw` command. To get help on this command, execute `tikv-br restore raw -h` or `tikv-br restore raw --help`. +For example, restore the raw backup files in s3 `/backup-data/2022-09-16` to `TiKV` cluster. + +```bash +export AWS_ACCESS_KEY_ID=&{AWS_KEY_ID}; +export AWS_SECRET_ACCESS_KEY=&{AWS_KEY}; +tikv-br restore raw \ + --pd="${PDIP}:2379" \ + --storage="s3://backup-data/2022-09-16/" \ + --ratelimit=128 \ + --log-file=restoreraw.log +``` +Explanations for some options in the above command are as follows: + +- `--ratelimit`: The maximum speed at which a restoration operation is performed (MiB/s) on each `TiKV` node. +- `--log-file`: Writing the TiKV-BR log to the `restoreraw.log` file. + +A progress bar is displayed in the terminal during the restoration. When the progress bar advances to 100%, the restoration is complete. The progress bar is displayed as follows: +```bash +tikv-br restore raw \ + --pd="${PDIP}:2379" \ + --storage="s3://backup-data/2022-09-16/" \ + --ratelimit=128 \ + --log-file=restoreraw.log +Restore Raw <---------/...............................................> 17.12%. +``` + +After restoration finish, the result message is displayed as follows: +```bash +[2022/09/20 18:02:12.540 +08:00] [INFO] [collector.go:67] ["Raw restore success summary"] [total-ranges=3] [ranges-succeed=3] [ranges-failed=0] [restore-files=3] [total-take=950.460846ms] [restore-data-size(after-compressed)=78.3MB] [total-kv=100000] [total-kv-size=108.7MB] [average-speed=114.4MB/s] +``` +Explanations for the above message are as follows: +- `total-ranges`: Number of ranges that the whole backup task is split to. Equals to `ranges-succeed` + `ranges-failed`. +- `ranges-succeed`: Number of successful ranges. +- `ranges-failed`: Number of failed ranges. +- `restore-files`: Number of restored files. +- `total-take`: The restoration duration. +- `total-kv`: Total number of restored key-value pairs. +- `total-kv-size`: Total size restored key-value pairs. Note that this is the original size before compression. +- `average-speed`: The restoration speed, which approximately equals to `total-kv-size` / `total-take`. +- `restore-data-size(after-compressed)`: The restoration file size. + + +### Data Verification of Backup & Restore + + +TiKV-BR can do checksum between TiKV cluster and backup files after backup or restoration finishes with the config `--checksum=true`. Checksum is using the [checksum] interface in TiKV [client-go], which send checksum request to all TiKV regions to calculate the checksum of all **VALID** data. Then compare the checksum value of backup files which is calculated during the backup process. + +Please note that if data is stored in TiKV with [TTL], and expiration happens during backup or restore, the persisted checksum in backup files must be different from the checksum of TiKV cluster. So checksum should **NOT** be enabled in this scenario. + +### Security During Backup & Restoration + +TiKV-BR supports TLS if [TLS config] in TiKV cluster is enabled. + +Please specify the client certification with config `--ca`, `--cert` and `--key`. + +### Performance + +The backup and restoration are both distributed, so performance can increase linearly with the number of TiKV nodes, until the storage or network reach the limits. The following are some key metrics of TiKV-BR backup and restoration benchmark for reference. +- TiKV node: 4 cores CPU, 8 GB memory, v6.4.0 +- PD node: 4 cores CPU, 8 GB memory, v6.4.0 +- TiKV-BR node: 4 cores CPU, 8 GB memory, v1.1.0 +- Data volume: 50 TB + +|Metric|TiKV API v1|TiKV API v2| +|:-:|:-:|:-:| +|Backup speed|40MB/s per TiKV node|40MB/s per TiKV node| +|Restoration speed|70MB/s per TiKV node|70MB/s per TiKV node| +|Performance impact|20% on QPS/Latency|20% on QPS/Latency| + +#### Performance tuning + +If you want to reduce the business impact of backup tasks, you can enable the [auto-tune] feature. With this feature enabled, TiKV-BR performs backup tasks as fast as possible without excessively affecting the cluster. See [BR Auto-Tune] for details. + +Alternatively, you can limit the backup speed by using the parameter `--ratelimit` of tikv-br. + + +[中文使用手册]: ../backup-restore-cn +[TiKV Backup & Restore (TiKV-BR)]: https://github.com/tikv/migration/tree/main/br +[Linux OS version requirements]: https://docs.pingcap.com/tidb/dev/hardware-and-software-requirements#linux-os-version-requirements +[TiUP]: https://tiup.io +[GitHub]: https://github.com/tikv/migration/releases +[api-version]: ../api-v2 +[API v2]: ../api-v2 +[hex]: https://en.wikipedia.org/wiki/Hexadecimal +[escaped]: https://en.wikipedia.org/wiki/Escape_character +[Creating a replication task]: ../cdc/cdc#manage-replication-tasks-changefeed +[checksum]: ../../../develop/rawkv/checksum +[client-go]: https://github.com/tikv/client-go +[TTL]: ../ttl +[TLS config]: https://docs.pingcap.com/tidb/dev/enable-tls-between-components +[auto-tune]: https://docs.pingcap.com/tidb/stable/tikv-configuration-file#enable-auto-tune-new-in-v540 +[BR Auto-Tune]: https://docs.pingcap.com/tidb/dev/br-auto-tune \ No newline at end of file diff --git a/content/docs/7.1/concepts/explore-tikv-features/cas.md b/content/docs/7.1/concepts/explore-tikv-features/cas.md new file mode 100644 index 00000000..715d540a --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/cas.md @@ -0,0 +1,103 @@ +--- +title: CAS on RawKV +description: Compare And Swap +menu: + "7.1": + parent: Features-7.1 + weight: 4 + identifier: CAS on RawKV-7.1 +--- + +This page walks you through a simple demonstration of performing compare-and-swap (CAS) in TiKV. + +In RawKV, compare-and-swap (CAS) is an atomic instruction to achieve synchronization between multiple threads. + +Performing CAS is an atomic equivalent of executing the following code: + +``` +prevValue = get(key); +if (prevValue == request.prevValue) { + put(key, request.value); +} +return prevValue; +``` + +The atomicity guarantees that the new value is calculated based on the up-to-date information. If the value is updated by another thread at the same time, the write would fail. + +## Prerequisites + +Make sure that you have installed TiUP, jshell, downloaded tikv-client JAR files, and started a TiKV cluster according to [TiKV in 5 Minutes](../../tikv-in-5-minutes). + +## Verify CAS + +To verify whether CAS works, you can take the following steps. + +### Step 1: Write the code to test CAS + +Save the following script to the `test_raw_cas.java` file. + +```java +import java.util.Optional; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.raw.RawKVClient; +import org.tikv.shade.com.google.protobuf.ByteString; + +TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); +// enable AtomicForCAS when using RawKVClient.compareAndSet or RawKVClient.putIfAbsent +conf.setEnableAtomicForCAS(true); +TiSession session = TiSession.create(conf); +RawKVClient client = session.createRawClient(); + +ByteString key = ByteString.copyFromUtf8("Hello"); +ByteString value = ByteString.copyFromUtf8("CAS"); +ByteString newValue = ByteString.copyFromUtf8("NewValue"); + +// put +client.put(key, value); +System.out.println("put key=" + key.toStringUtf8() + " value=" + value.toStringUtf8()); + +// get +Optional result = client.get(key); +assert(result.isPresent()); +assert("CAS".equals(result.get().toStringUtf8())); +System.out.println("get key=" + key.toStringUtf8() + " result=" + result.get().toStringUtf8()); + +// cas +client.compareAndSet(key, Optional.of(value), newValue); +System.out.println("cas key=" + key.toStringUtf8() + " value=" + value.toStringUtf8() + " newValue=" + newValue.toStringUtf8()); + +// get +result = client.get(key); +assert(result.isPresent()); +assert("NewValue".equals(result.get().toStringUtf8())); +System.out.println("get key=" + key.toStringUtf8() + " result=" + result.get().toStringUtf8()); + +// close +client.close(); +session.close(); +``` + +### Step 2: Run the code + +```bash +jshell --class-path tikv-client-java.jar:slf4j-api.jar --startup test_raw_cas.java +``` + +The example output is as follows: + +```bash +put key=Hello value=CAS +get key=Hello result=CAS +cas key=Hello value=CAS newValue=NewValue +get key=Hello result=NewValue +``` + +As in the example output, after calling `compareAndSet`, the value `CAS` is replaced by `newValue`. + +{{< warning >}} + +- To ensure the linearizability of `CAS` when it is used together with `put`, `delete`, `batch_put`, or `batch_delete`, you must set `conf.setEnableAtomicForCAS(true)`. + +- To guarantee the atomicity of CAS, write operations such as `put` or `delete` in atomic mode take more resources. +{{< /warning >}} diff --git a/content/docs/7.1/concepts/explore-tikv-features/cdc/cdc-cn.md b/content/docs/7.1/concepts/explore-tikv-features/cdc/cdc-cn.md new file mode 100644 index 00000000..cd39a492 --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/cdc/cdc-cn.md @@ -0,0 +1,375 @@ +--- +title: RawKV CDC 使用手册 +description: 如何使用 RawKV Change Data Capture +menu: + "7.1": + parent: RawKV CDC-7.1 + weight: 1 + identifier: RawKV CDC CN-7.1 +--- + +本文是 RawKV CDC 的使用手册。 + +## RawKV CDC 使用手册 + +### 部署 + +{{< info >}} +支持 RawKV CDC 的最小 TiKV 版本为 [v6.2.0](https://docs.pingcap.com/zh/tidb/v6.2/release-6.2.0),并打开 [TiKV API V2](../../api-v2) +{{< /info >}} + +#### 使用 TiUP 部署 + +{{< info >}} +支持 TiKV-CDC 的最小 TiUP 版本为 v1.11.0 +{{< /info >}} + +##### 使用 TiUP 部署包含 TiKV-CDC 组件的全新 TiKV 集群 + +在使用 [TiUP] 部署全新的 TiKV 集群时,支持同时部署 TiKV-CDC 组件。只需在 TiUP 的拓扑配置中加入 TiKV-CDC 部分即可。可参考[模板](https://github.com/tikv/migration/blob/main/cdc/deployments/tikv-cdc/config-templates/topology.example.yaml)。 + +##### 使用 TiUP 在现有 TiKV 集群上新增 TiKV-CDC 组件 + +目前也支持在现有的 TiKV 集群上使用 TiUP 新增 TiKV-CDC 组件,操作步骤如下: + +1. 确认当前 TiKV 集群的版本 >= v6.2.0,并且已打开 [TiKV API V2]。 +2. 根据[模板](https://github.com/tikv/migration/blob/main/cdc/deployments/tikv-cdc/config-templates/scale-out.example.yaml)创建扩容配置文件。 +3. 通过 `tiup cluster scale-out` 扩容 TiKV-CDC 组件(TiUP 扩容可参考 [使用 TiUP 扩容缩容 TiDB 集群])。 +```bash +tiup cluster scale-out scale-out.yaml +``` + +#### 手工部署 + +1. 部署两个 TiKV 集群,分别作为上游集群和下游集群。 +2. 启动 TiKV-CDC 集群,可包含一个或多个 TiKV-CDC server。TiKV-CDC server 的启动命令是 `tikv-cdc server --pd `。 +3. 通过以下命令启动同步任务:`tikv-cdc cli changefeed create --pd --sink-uri tikv://`。 + +#### TiKV-CDC server 启动参数 +* `addr`:TiKV-CDC 的监听地址,用于提供 HTTP API 和 Prometheus 查询,默认为 127.0.0.1:8600。 +* `advertise-addr`:TiKV-CDC 供客户端访问的外部开放地址。如果未设置,默认与 `addr` 相同。 +* `pd`:TiKV-CDC 监听的 PD 节点地址,多个地址用英文逗号(`,`)分隔。 +* `config`:可选项,指定 TiKV-CDC 使用的配置文件路径。 +* `data-dir`:可选项,指定 TiKV-CDC 存储运行时数据的目录,主要用于外部排序。建议确保该目录所在设备的可用空间大于等于 500 GiB。 +* `gc-ttl`:可选项,TiKV-CDC 在 PD 设置服务级别 GC safepoint 的 TTL (Time To Live) 时长。同时也是 TiKV-CDC 同步任务暂停的最大时长。单位为秒,默认值为 86400,即 24 小时。注意:TiKV-CDC 同步任务的暂停会影响集群 GC safepoint 的推进。`gc-ttl` 越大,同步任务可以暂停的时间越长,但同时需要保留更多的过期数据、并占用更多的存储空间。反之亦然。 +* `log-file`:可选项,TiKV-CDC 进程运行时日志的输出路径,未设置时默认为标准输出 (stdout)。 +* `log-level`:可选项,TiKV-CDC 进程运行时的日志路径,默认为 info。 +* `ca`:可选项,指定用于 TLS 连接的 CA 证书文件路径。仅支持 PEM 格式。 +* `cert`:可选项,指定用于 TLS 连接的证书文件路径。仅支持 PEM 格式。 +* `key`:可选项,指定用于 TLS 连接的私钥文件路径。仅支持 PEM 格式。 +* `cert-allowed-cn`:可选项,指定允许的调用者标识(即证书 Common Name,CN)。多个 CN 用英文逗号(`,`)分隔。 + +### 运维管理 + +#### 必备条件 + +运维管理需要使用 **tikv-cdc** 二进制可执行文件。Linux x86-64 下的二进制可执行文件可以通过 TiUP 获取(如下所示),或者从 [releases](https://github.com/tikv/migration/releases) 页面下载。其他平台需要从[源代码](https://github.com/tikv/migration/tree/main/cdc)编译。 + +```bash +tiup install tikv-cdc +tiup tikv-cdc cli --help +``` + +#### 管理 TiKV-CDC 服务进程 (`capture`) + +##### 查询 `capture` 列表 +```bash +tikv-cdc cli capture list --pd=http://192.168.100.122:2379 +``` +```bash +[ + { + "id": "07684765-52df-42a0-8dd1-a4e9084bb7c1", + "is-owner": false, + "address": "192.168.100.9:8600" + }, + { + "id": "aea1445b-c065-4dc5-be53-a445261f7fc2", + "is-owner": true, + "address": "192.168.100.26:8600" + }, + { + "id": "f29496df-f6b4-4c1e-bfa3-41a058ce2144", + "is-owner": false, + "address": "192.168.100.142:8600" + } +] +``` + +在以上结果中: + +* `id`:服务进程的 ID。 +* `is-owner`:表示该服务进程是否为 owner 节点。 +* `address`:该服务进程对外提供接口的地址。 + +如果要求使用 TLS 连接: +```bash +tikv-cdc cli capture list --pd=http://192.168.100.122:2379 --ca=$TLS_DIR/ca.pem --cert=$TLS_DIR/client.pem --key=$TLS_DIR/client-key.pem +``` + +在以上命令中: +* `ca`:指定 CA 证书文件路径。仅支持 PEM 格式。 +* `cert`:指定证书文件路径。仅支持 PEM 格式。 +* `key`:指定私钥文件路径。仅支持 PEM 格式。 + +#### 管理同步任务 (`changefeed`) + +##### 创建同步任务 +```bash +tikv-cdc cli changefeed create --pd=http://192.168.100.122:2379 --sink-uri="tikv://192.168.100.61:2379/" --changefeed-id="rawkv-replication-task" +``` +```bash +Create changefeed successfully! +ID: rawkv-replication-task +Info: {"sink-uri":"tikv://192.168.100.61:2379","opts":{},"create-time":"2022-07-20T15:35:47.860947953+08:00","start-ts":434714063103852547,"target-ts":0,"admin-job-type":0,"sort-engine":"unified","sort-dir":"","scheduler":{"type":"keyspan-number","polling-time":-1},"state":"normal","history":null,"error":null} +``` + +在以上命令和结果中: + +* `--changefeed-id`:同步任务的 ID,格式需要符合正则表达式 `^[a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*$`。如果不指定该 ID,TiKV-CDC 会自动生成一个 UUID(version 4 格式)作为 ID。 + +* `--sink-uri`:同步任务下游的地址,需要按照以下格式进行配置。目前 scheme 仅支持 `tikv`。此外,如果 URI 中包含特殊字符,需要以 URL 编码对特殊字符进行处理。 + +``` +[scheme]://[userinfo@][host]:[port][/path]?[query_parameters] +``` + +* `--start-ts`:指定 changefeed 的开始 TSO。TiKV-CDC 集群将从这个 TSO 开始拉取数据。默认为当前时间。 + +{{< info >}} +如果需要将现有集群中的存量数据同步到下游,请参考 [如何同步 TiKV 集群中的存量数据](#如何同步现有-tikv-集群中的存量数据)。 +{{< /info >}} + +##### Sink URI 配置 `tikv` +```bash +--sink-uri="tikv://192.168.100.61:2379/" +``` +| 参数 | 说明 | +|---------------------------|-----------------------------------------| +| 192.168.100.61:2379 | 下游 PD 地址。多个地址用英文逗号(`,`)分隔。 | + +如果要求使用 TLS 连接: +```bash +--sink-uri="tikv://192.168.100.61:2379/?ca-path=$TLS_DIR/ca.pem&cert-path=$TLS_DIR/client.pem&key-path=$TLS_DIR/client-key.pem" +``` +| 参数 | 说明 | +|---------------------------|----------------------------------------| +| 192.168.100.61:2379 | 下游 PD 地址。多个地址用英文逗号(`,`)分隔。 | +| ca-path | CA 证书文件路径,仅支持 PEM 格式。 | +| cert-path | 证书文件路径,仅支持 PEM 格式。 | +| key-path | 私钥文件路径,仅支持 PEM 格式 | + +##### 查询同步任务列表 +```bash +tikv-cdc cli changefeed list --pd=http://192.168.100.122:2379 +``` +```bash +[ + { + "id": "rawkv-replication-task", + "summary": { + "state": "normal", + "tso": 434715745556889877, + "checkpoint": "2022-07-20 17:22:45.900", + "error": null + } + } +] +``` + +在以上结果中: + +* `checkpoint` 表示 TiKV-CDC 已经将该时间点前的数据同步到了下游。 +* `state` 为该同步任务的状态: + * `normal`:正常同步 + * `stopped`:停止同步(手动暂停) + * `error`:停止同步(出错) + * `removed`:已删除任务(只在指定 --all 选项时才会显示该状态的任务) + + +##### 查询特定同步任务 +```bash +tikv-cdc cli changefeed query -s --changefeed-id rawkv-replication-task --pd=http://192.168.100.122:2379 +``` +```bash +{ + "state": "normal", + "tso": 434716089136185435, + "checkpoint": "2022-07-20 17:44:36.551", + "error": null +} +``` + +在以上命令和结果中: + +* `-s` 代表仅返回简化后的同步状态。 +* `state` 代表当前 changefeed 的同步状态,与 changefeed list 中的状态相同。 +* `tso` 代表当前 changefeed 中已经成功写入下游的最大 TSO。 +* `checkpoint` 代表当前 changefeed 中已经成功写入下游的最大 TSO 对应的时间。 +* `error` 记录当前 changefeed 是否有错误发生。 + + +```bash +tikv-cdc cli changefeed query --changefeed-id rawkv-replication-task --pd=http://192.168.100.122:2379 +``` +```bash +{ + "info": { + "sink-uri": "tikv://192.168.100.61:2379/", + "opts": {}, + "create-time": "2022-07-20T17:21:54.115625346+08:00", + "start-ts": 434715731964985345, + "target-ts": 0, + "admin-job-type": 0, + "sort-engine": "unified", + "sort-dir": "", + "config": { + "check-gc-safe-point": true, + "scheduler": { + "type": "keyspan-number", + "polling-time": -1 + }, + }, + "state": "normal", + "history": null, + "error": null, + "sync-point-enabled": false, + "sync-point-interval": 600000000000, + }, + "status": { + "resolved-ts": 434715754364928912, + "checkpoint-ts": 434715754103047044, + "admin-job-type": 0 + }, + "count": 0, + "task-status": [ + { + "capture-id": "aea1445b-c065-4dc5-be53-a445261f7fc2", + "status": { + "keyspans": { + "15137828009456710810": { + "start-ts": 434715731964985345, + "Start": "cg==", + "End": "cw==" + } + }, + "operation": {}, + "admin-job-type": 0 + } + } + ] +} +``` + +在以上结果中: + +* `info`:代表当前 changefeed 的同步配置。 +* `status`:代表当前 changefeed 的同步状态信息。 +* `resolved-ts`:代表当前 changefeed 从上游 TiKV 接收到的最大水位线(watermark)。**水位线**是一个时间戳,表示所有早于这个时间戳的 RawKV 数据,都已经从上游 TiKV 接收到了。 +* `checkpoint-ts`:代表当前 changefeed 中已经成功写入下游的最大水位线(watermark)。这个**水位线**表示所有早于这个时间戳的 RawKV 数据,都已经成功写入下游 TiKV。 +* `admin-job-type`:代表当前 changefeed 的状态: + * `0`:状态正常。 + * `1`:任务暂停,停止任务后所有同步 processor 会结束退出,同步任务的配置和同步状态都会保留,可以从 checkpoint-ts 恢复任务。 + * `2`:任务恢复,同步任务从 checkpoint-ts 继续同步。 + * `3`:任务已删除,所有同步 processor 结束退出,并清理同步任务配置信息。同步状态保留,只提供查询,没有其他实际功能。 +* `task-status` 代表当前 changefeed 所分配的各个同步子任务的状态信息。 + + +##### 停止同步任务 +```bash +tikv-cdc cli changefeed pause --changefeed-id rawkv-replication-task --pd=http://192.168.100.122:2379 +tikv-cdc cli changefeed list --pd=http://192.168.100.122:2379 +``` +```bash +[ + { + "id": "rawkv-replication-task", + "summary": { + "state": "stopped", + "tso": 434715759083521004, + "checkpoint": "2022-07-20 17:23:37.500", + "error": null + } + } +] +``` + +在以上命令中: + +* `--changefeed-id=uuid` 为需要操作的 `changefeed` ID。 + + +##### 恢复同步任务 +```bash +tikv-cdc cli changefeed resume --changefeed-id rawkv-replication-task --pd=http://192.168.100.122:2379 +tikv-cdc cli changefeed list --pd=http://192.168.100.122:2379 +``` +```bash +[ + { + "id": "rawkv-replication-task", + "summary": { + "state": "normal", + "tso": 434715759083521004, + "checkpoint": "2022-07-20 17:23:37.500", + "error": null + } + } +] +``` + +##### 删除同步任务 +```bash +tikv-cdc cli changefeed remove --changefeed-id rawkv-replication-task --pd=http://192.168.100.122:2379 +tikv-cdc cli changefeed list --pd=http://192.168.100.122:2379 +``` +```bash +[] +``` + +#### 查询同步子任务处理单元 (processor) +```bash +tikv-cdc cli processor list --pd=http://192.168.100.122:2379` +``` +```bash +[ + { + "changefeed_id": "rawkv-replication-task", + "capture_id": "07684765-52df-42a0-8dd1-a4e9084bb7c1" + } +] +``` + +## 常见问题 + +### 如何同步现有 TiKV 集群中的存量数据 + +首先通过 [TiKV-BR] 将存量数据复制到下游(需要部署 [NFS]、[S3] 等网络共享存储),然后创建 changefeed 进行后续的增量数据同步。 + +不建议使用 TiKV-CDC 直接同步存量数据,原因包括: + +- TiKV 集群垃圾回收的生命期(life time)较短(默认为 10 分钟),因此在大部分情况下,直接进行同步是不可行的。Changefeed 的 `start-ts` 不可小于 **GC Safe Point**。 +- 如果存量数据较大,通过 TiKV-CDC 同步较为低效,因为所有的存量数据都需要首先拉取并暂存在 TiKV-CDC 中,然后按时间戳大小排序,才能最后写入下游集群。相比之下,TiKV-BR 可以充分利用整个 TiKV 集群的资源,因为在备份和恢复的过程中,每个 region 直接向共享存储导出或者导入数据,并且不需要排序。 + +同步存量数据的步骤: + +1) 通过 TiKV-BR 备份上游集群数据,并指定足够长的 `--gcttl` 参数。参考 [备份 Raw 模式数据]。 +> 注意:`--gcttl` 需要包括数据备份时长、数据恢复时长、以及其他准备工作的时长。如果无法预计这些时长,可以临时停止 GC(`SET GLOBAL tidb_gc_enable = "OFF";`,见 [tidb_gc_enable]),并在 changefeed 启动后恢复(`SET GLOBAL tidb_gc_enable = "ON";`)。 + +2) 记录步骤 1 备份结果中的 `backup-ts`。 + +3) 将备份数据恢复到下游集群。参考 [恢复 Raw 模式数据]。 + +4) 创建 changefeed,并指定 `--start-ts=`。 + + +[TiKV API V2]: ../../api-v2 +[v6.2.0]: https://docs.pingcap.com/zh/tidb/v6.2/release-6.2.0 +[TiUP]: https://docs.pingcap.com/zh/tidb/stable/production-deployment-using-tiup +[使用 TiUP 扩容缩容 TiDB 集群]: https://docs.pingcap.com/zh/tidb/stable/scale-tidb-using-tiup +[TiKV-BR]: ../../backup-restore-cn +[NFS]: https://en.wikipedia.org/wiki/Network_File_System +[S3]: https://aws.amazon.com/s3/ +[备份 Raw 模式数据]: ../../backup-restore-cn/#%E5%A4%87%E4%BB%BD%E9%9B%86%E7%BE%A4-raw-%E6%A8%A1%E5%BC%8F%E6%95%B0%E6%8D%AE +[恢复 Raw 模式数据]: ../../backup-restore-cn/#%E6%81%A2%E5%A4%8D-raw-%E6%A8%A1%E5%BC%8F%E5%A4%87%E4%BB%BD%E6%95%B0%E6%8D%AE +[tidb_gc_enable]: https://docs.pingcap.com/zh/tidb/stable/system-variables#tidb_gc_enable-%E4%BB%8E-v50-%E7%89%88%E6%9C%AC%E5%BC%80%E5%A7%8B%E5%BC%95%E5%85%A5 diff --git a/content/docs/7.1/concepts/explore-tikv-features/cdc/cdc.md b/content/docs/7.1/concepts/explore-tikv-features/cdc/cdc.md new file mode 100644 index 00000000..ebb3ee10 --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/cdc/cdc.md @@ -0,0 +1,400 @@ +--- +title: RawKV CDC +description: How to use RawKV Change Data Capture +menu: + "7.1": + parent: Features-7.1 + weight: 8 + identifier: RawKV CDC-7.1 +--- + +This page introduces what's RawKV Change Data Capture and how to use it. + +[中文使用手册] + +## RawKV Change Data Capture + +**RawKV Change Data Capture** (*abbr.* **RawKV CDC**) is a feature that providing [Change Data Capture] capability for RawKV, to meet high availability requirements. + +By RawKV CDC, you can build a storage system with **Cross Cluster Replication**, to implement financial-level disaster recovery. + +{{< info >}} +To use RawKV CDC, you need to enable [TiKV API V2](../../api-v2) and deploy a **TiKV-CDC** cluster. The minimal required version of TiKV is [v6.2.0](https://docs.pingcap.com/tidb/v6.2/release-6.2.0). +{{< /info >}} + +{{< figure + src="/img/docs/rawkv-cdc.png" + caption="RawKV CDC" + number="1" >}} + +## TiKV-CDC + +**TiKV-CDC** is [TiKV](https://docs.pingcap.com/tidb/dev/tikv-overview)'s change data capture framework. It supports replicating change data to another TiKV cluster. + +It forks from [TiCDC](https://github.com/pingcap/tiflow/blob/master/README_TiCDC.md), but focus on NoSQL scenario that uses TiKV as a Key-Value storage. + +{{< figure + src="/img/docs/rawkv-cdc-arch-simple.png" + caption="TiKV-CDC Architecture" + number="2" >}} + +## Instruction Manual + +### Deployment + +#### Deploy by TiUP + +{{< info >}} +[TiUP](https://tiup.io) >= v1.11.0 is required. +{{< /info >}} + +##### Deploy a new TiDB/TiKV cluster including TiKV-CDC + +When you deploy a new TiDB/TiKV cluster using TiUP, you can also deploy TiKV-CDC at the same time. You only need to add the `kvcdc_servers` section in the initialization configuration file that TiUP uses to start the TiDB/TiKV cluster. Please refer to the configuration [template](https://github.com/tikv/migration/blob/main/cdc/deployments/tikv-cdc/config-templates/topology.example.yaml). + +##### Add TiKV-CDC to an existing TiDB/TiKV cluster + +You can also use TiUP to add the TiKV-CDC component to an existing TiDB/TiKV cluster. Take the following procedures: + +1. Make sure that the current TiDB/TiKV version >= 6.2.0 and [TiKV API V2] is enabled. +2. Prepare a scale-out configuration file, refer to [template](https://github.com/tikv/migration/blob/main/cdc/deployments/tikv-cdc/config-templates/scale-out.example.yaml). +3. Scale out by `tiup cluster scale-out`. Also refer to [Scale a TiDB Cluster Using TiUP](https://docs.pingcap.com/tidb/stable/scale-tidb-using-tiup). +```bash +tiup cluster scale-out scale-out.yaml +``` + +#### Deploy manually + +1. Set up two TiKV clusters, one for upstream and another for downstream. +2. Start a TiKV-CDC cluster, which contains one or more TiKV-CDC servers. The command to start on TiKV-CDC server is `tikv-cdc server --pd `. +3. Start a replication changefeed by `tikv-cdc cli changefeed create --pd --sink-uri tikv://` + +#### Arguments for starting TiKV-CDC server +* `addr`: The listening address of TiKV-CDC, the HTTP API address, and the Prometheus address of the TiKV-CDC service. The default value is 127.0.0.1:8600. +* `advertise-addr`: The advertised address via which clients access TiKV-CDC. If unspecified, the value is the same as `addr`. +* `pd`: A comma-separated list of PD endpoints. +* `config`: The address of the configuration file that TiKV-CDC uses (optional). +* `data-dir`: Specifies the directory that TiKV-CDC uses to store temporary files for sorting. It is recommended to ensure that the free disk space for this directory is greater than or equal to 500 GiB (optional). +* `gc-ttl`: The TTL (Time To Live, in seconds) of the service level `GC safepoint` in PD set by TiKV-CDC (optional). It's the duration that replication tasks can suspend, defaults to 86400, i.e. 24 hours. Note that suspending of replication task will affect the progress of TiKV garbage collection. The longer of `gc-ttl`, the longer a changefeed can be paused, but at the same time more obsolete data will be kept and larger space will be occupied. Vice versa. +* `log-file`: The path to which logs are output when the TiKV-CDC process is running (optional). If this parameter is not specified, logs are written to the standard output (stdout). +* `log-level`: The log level when the TiKV-CDC process is running (optional). The default value is "info". +* `ca`: The path of the CA certificate file in PEM format for TLS connection (optional). +* `cert`: The path of the certificate file in PEM format for TLS connection (optional). +* `key`: The path of the private key file in PEM format for TLS connection (optional). +* `cert-allowed-cn`: Specifies to verify caller's identity (certificate Common Name, optional). Use comma to separate multiple CN. + +### Maintenance + +#### Prerequisites + +A **tikv-cdc** binary is required for maintenance work. Pre-built binary for Linux x86-64 can be obtained by TiUP as following, or downloaded from [releases](https://github.com/tikv/migration/releases). For other platforms, you have to build from [source](https://github.com/tikv/migration/tree/main/cdc). + +```bash +tiup install tikv-cdc +tiup tikv-cdc cli --help +``` + +#### Manage TiKV-CDC service (`capture`) + +##### Query the `capture` list +```bash +tikv-cdc cli capture list --pd=http://192.168.100.122:2379 +``` +```bash +[ + { + "id": "07684765-52df-42a0-8dd1-a4e9084bb7c1", + "is-owner": false, + "address": "192.168.100.9:8600" + }, + { + "id": "aea1445b-c065-4dc5-be53-a445261f7fc2", + "is-owner": true, + "address": "192.168.100.26:8600" + }, + { + "id": "f29496df-f6b4-4c1e-bfa3-41a058ce2144", + "is-owner": false, + "address": "192.168.100.142:8600" + } +] +``` + +In the result above: + +* `id`: The ID of the service process. +* `is-owner`: Indicates whether the service process is the owner node. +* `address`: The address to access to. + +If TLS is required: +```bash +tikv-cdc cli capture list --pd=http://192.168.100.122:2379 --ca=$TLS_DIR/ca.pem --cert=$TLS_DIR/client.pem --key=$TLS_DIR/client-key.pem +``` + +In the command above: +* `ca`: Specifies the path of the CA certificate file in PEM format for TLS connection. +* `cert`: Specifies the path of the certificate file in PEM format for TLS connection. +* `key`: Specifies the path of the private key file in PEM format for TLS connection. + +#### Manage Replication Tasks (`changefeed`) + +##### Create a replication task +```bash +tikv-cdc cli changefeed create --pd=http://192.168.100.122:2379 --sink-uri="tikv://192.168.100.61:2379/" --changefeed-id="rawkv-replication-task" --start-ts=434716089136185435 +``` +```bash +Create changefeed successfully! +ID: rawkv-replication-task +Info: {"sink-uri":"tikv://192.168.100.61:2379","opts":{},"create-time":"2022-07-20T15:35:47.860947953+08:00","start-ts":434714063103852547,"target-ts":0,"admin-job-type":0,"sort-engine":"unified","sort-dir":"","scheduler":{"type":"keyspan-number","polling-time":-1},"state":"normal","history":null,"error":null} +``` + +In the command and result above: + +* `--changefeed-id`: The ID of the replication task. The format must match the `^[a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*$` regular expression. If this ID is not specified, TiKV-CDC automatically generates a UUID (the version 4 format) as the ID. + +* `--sink-uri`: The downstream address of the replication task. Configure `--sink-uri` according to the following format. Currently, the scheme supports `tikv` only. Besides, when a URI contains special characters, you need to process these special characters using URL encoding. + +``` +[scheme]://[userinfo@][host]:[port][/path]?[query_parameters] +``` + +* `--start-ts`: Specifies the starting TSO of the changefeed. TiKV-CDC will replicate RawKV entries starting from this TSO. The default value is the current time. + +{{< info >}} +Refer to [How to replicate TiKV cluster with existing data](#how-to-replicate-tikv-cluster-with-existing-data) if the replication is deployed on a existing cluster. +{{< /info >}} + +##### Configure sink URI with `tikv` +```bash +--sink-uri="tikv://192.168.100.61:2379/" +``` +| Parameter/Parameter Value | Description | +|---------------------------|--------------------------------------------------------------------------------| +| 192.168.100.61:2379 | The endpoints of the downstream PD. Multiple addresses are separated by comma. | + +If TLS is required: +```bash +--sink-uri="tikv://192.168.100.61:2379/?ca-path=$TLS_DIR/ca.pem&cert-path=$TLS_DIR/client.pem&key-path=$TLS_DIR/client-key.pem" +``` +| Parameter/Parameter Value | Description | +|---------------------------|--------------------------------------------------------------------------------| +| 192.168.100.61:2379 | The endpoints of the downstream PD. Multiple addresses are separated by comma. | +| ca-path | The path of the CA certificate file in PEM format. | +| cert-path | The path of the certificate file in PEM format. | +| key-path | The path of the private key file in PEM format. | + +##### Query the replication task list +```bash +tikv-cdc cli changefeed list --pd=http://192.168.100.122:2379 +``` +```bash +[ + { + "id": "rawkv-replication-task", + "summary": { + "state": "normal", + "tso": 434715745556889877, + "checkpoint": "2022-07-20 17:22:45.900", + "error": null + } + } +] +``` + +In the result above: + +* `checkpoint` indicates that TiKV-CDC has already replicated data before this time point to the downstream. +* `state` indicates the state of the replication task. + * `normal`: The replication task runs normally. + * `stopped`: The replication task is stopped (manually paused). + * `error`: The replication task is stopped (by an error). + * `removed`: The replication task is removed. Tasks of this state are displayed only when you have specified the `--all` option. To see these tasks when this option is not specified, execute the `changefeed query` command. + + +##### Query a specific replication task +```bash +tikv-cdc cli changefeed query -s --changefeed-id=rawkv-replication-task --pd=http://192.168.100.122:2379 +``` +```bash +{ + "state": "normal", + "tso": 434716089136185435, + "checkpoint": "2022-07-20 17:44:36.551", + "error": null +} +``` + +In the command and result above: + +* `-s` shows simplified result. +* `state` is the replication state of the current changefeed. Each state must be consistent with the state in changefeed list. +* `tso` represents the largest TSO in the current changefeed that has been successfully replicated to the downstream. +* `checkpoint` represents the corresponding time of the largest TSO in the current changefeed that has been successfully replicated to the downstream. +* `error` records whether an error has occurred in the current changefeed. + + +```bash +tikv-cdc cli changefeed query --changefeed-id=rawkv-replication-task --pd=http://192.168.100.122:2379 +``` +```bash +{ + "info": { + "sink-uri": "tikv://192.168.100.61:2379/", + "opts": {}, + "create-time": "2022-07-20T17:21:54.115625346+08:00", + "start-ts": 434715731964985345, + "target-ts": 0, + "admin-job-type": 0, + "sort-engine": "unified", + "sort-dir": "", + "config": { + "check-gc-safe-point": true, + "scheduler": { + "type": "keyspan-number", + "polling-time": -1 + }, + }, + "state": "normal", + "history": null, + "error": null, + "sync-point-enabled": false, + "sync-point-interval": 600000000000, + }, + "status": { + "resolved-ts": 434715754364928912, + "checkpoint-ts": 434715754103047044, + "admin-job-type": 0 + }, + "count": 0, + "task-status": [ + { + "capture-id": "aea1445b-c065-4dc5-be53-a445261f7fc2", + "status": { + "keyspans": { + "15137828009456710810": { + "start-ts": 434715731964985345, + "Start": "cg==", + "End": "cw==" + } + }, + "operation": {}, + "admin-job-type": 0 + } + } + ] +} +``` + +In the result above: + +* `info` is the replication configuration of the queried changefeed. +* `status` is the replication state of the queried changefeed. +* `resolved-ts`: The largest watermark received from upstream in the current changefeed. The **watermark** is a timestamp indicating that all RawKV entries earlier than it have been received. +* `checkpoint-ts`: The largest watermark written to downstream successfully in the current changefeed. +* `admin-job-type`: The status of a changefeed: + * `0`: The state is normal. + * `1`: The task is paused. When the task is paused, all replicated processors exit. The configuration and the replication status of the task are retained, so you can resume the task from `checkpoint-ts`. + * `2`: The task is resumed. The replication task resumes from `checkpoint-ts`. + * `3`: The task is removed. When the task is removed, all replicated processors are ended, and the configuration information of the replication task is cleared up. Only the replication status is retained for later queries. +* `task-status` indicates the state of each replication sub-task in the queried changefeed. + + +##### Pause a replication task +```bash +tikv-cdc cli changefeed pause --changefeed-id=rawkv-replication-task --pd=http://192.168.100.122:2379 +tikv-cdc cli changefeed list --pd=http://192.168.100.122:2379 +``` +```bash +[ + { + "id": "rawkv-replication-task", + "summary": { + "state": "stopped", + "tso": 434715759083521004, + "checkpoint": "2022-07-20 17:23:37.500", + "error": null + } + } +] +``` + +In the command above: + +* `--changefeed-id=uuid` represents the ID of the changefeed that corresponds to the replication task you want to pause. + + +##### Resume a replication task +```bash +tikv-cdc cli changefeed resume --changefeed-id=rawkv-replication-task --pd=http://192.168.100.122:2379 +tikv-cdc cli changefeed list --pd=http://192.168.100.122:2379 +``` +```bash +[ + { + "id": "rawkv-replication-task", + "summary": { + "state": "normal", + "tso": 434715759083521004, + "checkpoint": "2022-07-20 17:23:37.500", + "error": null + } + } +] +``` + +##### Remove a replication task +```bash +tikv-cdc cli changefeed remove --changefeed-id=rawkv-replication-task --pd=http://192.168.100.122:2379 +tikv-cdc cli changefeed list --pd=http://192.168.100.122:2379 +``` +```bash +[] +``` + +#### Query processing units of replication sub-tasks (processor) +```bash +tikv-cdc cli processor list --pd=http://192.168.100.122:2379` +``` +```bash +[ + { + "changefeed_id": "rawkv-replication-task", + "capture_id": "07684765-52df-42a0-8dd1-a4e9084bb7c1" + } +] +``` + +## FAQs + +### How to replicate TiKV cluster with existing data + +Use [TiKV-BR] to migrate the existing data to downstream cluster (network shared storage, e.g. [NFS] or [S3], is required). Then start the changefeed for incremental replication. + +We don't recommend replicating existing data by TiKV-CDC because: + +- First, as life time of garbage collection is short (defaults to 10 minutes), in most circumstance it's not applicable to perform the replication. You can not create a changefeed with `start-ts` earlier than **GC Safe Point**. +- Second, if there are mass existing data, replication by TiKV-CDC is inefficiency, as all existing data must be gathered, hold, and sorted in TiKV-CDC, before finally write to downstream. By contrast, TiKV-BR can utilize the power of the whole cluster, as all regions are directly exported to and imported from the shared storage. + +To replicate TiKV cluster with existing data: + +1) Backup upstream cluster by TiKV-BR, with a long enough `--gcttl`. See [Backup Raw Data] for more details. +> NOTE: value of `--gcttl` should include duration of backup, restoration, and other preparation work. If you are not sure about the value of `--gcttl`, you can disable GC temporarily (`SET GLOBAL tidb_gc_enable = "OFF";`, see [tidb_gc_enable]), and enable it after changefeed has started (`SET GLOBAL tidb_gc_enable = "ON";`). + +2) Record `backup-ts` from backup result in *Step 1*. + +3) Restore to downstream cluster. Refer to [Restore Raw Data]. + +4) Create changefeed with `--start-ts=`. + + +[Change Data Capture]: https://en.wikipedia.org/wiki/Change_data_capture +[TiKV API V2]: ../../api-v2 +[v6.2.0]: https://docs.pingcap.com/tidb/v6.2/release-6.2.0 +[TiUP]: https://tiup.io +[TiKV-BR]: ../../backup-restore +[NFS]: https://en.wikipedia.org/wiki/Network_File_System +[S3]: https://aws.amazon.com/s3/ +[Backup Raw Data]: ../../backup-restore/#backup-raw-data +[Restore Raw Data]: ../../backup-restore/#restore-raw-data +[tidb_gc_enable]: https://docs.pingcap.com/tidb/stable/system-variables#tidb_gc_enable-new-in-v50 +[中文使用手册]: ../cdc-cn diff --git a/content/docs/7.1/concepts/explore-tikv-features/distributed-transaction.md b/content/docs/7.1/concepts/explore-tikv-features/distributed-transaction.md new file mode 100644 index 00000000..63b01dbb --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/distributed-transaction.md @@ -0,0 +1,173 @@ +--- +title: Distributed Transaction +description: How transaction works on TxnKV +menu: + "7.1": + parent: Features-7.1 + weight: 5 + identifier: Distributed Transaction-7.1 +--- + +This chapter walks you through a simple demonstration of how TiKV's distributed transaction works. + +## Prerequisites + +Before you start, ensure that you have set up a TiKV cluster and installed the `tikv-client` Python package according to [TiKV in 5 Minutes](../../tikv-in-5-minutes). + +{{< warning >}} +TiKV Java client's Transaction API has not been released yet, so the Python client is used in this example. +{{< /warning >}} + +## Test snapshot isolation + +Transaction isolation is one of the foundations of database transaction processing. Isolation is one of the four key properties of a transaction (commonly referred as ACID). + +TiKV implements [Snapshot Isolation (SI)](https://en.wikipedia.org/wiki/Snapshot_isolation) consistency, which means that: + +- all reads made in a transaction will see a consistent snapshot of the database (in practice, TiKV Client reads the last committed values that exist since TiKV Client has started); +- the transaction will successfully commit only if the updates that a transaction has made do not conflict with the concurrent updates made by other transactions since that snapshot. + +The following example shows how to test TiKV's snapshot isolation. + +Save the following script to file `test_snapshot_isolation.py`. + +```python +from tikv_client import TransactionClient + +client = TransactionClient.connect("127.0.0.1:2379") + +# clean +txn1 = client.begin() +txn1.delete(b"k1") +txn1.delete(b"k2") +txn1.commit() + +# put k1 & k2 without commit +txn2 = client.begin() +txn2.put(b"k1", b"Snapshot") +txn2.put(b"k2", b"Isolation") + +# get k1 & k2 returns nothing +# cannot read the data before transaction commit +snapshot1 = client.snapshot(client.current_timestamp()) +print(snapshot1.batch_get([b"k1", b"k2"])) + +# commit txn2 +txn2.commit() + +# get k1 & k2 returns nothing +# still cannot read the data after transaction commit +# because snapshot1's timestamp < txn2's commit timestamp +# snapshot1 can see a consistent snapshot of the database +print(snapshot1.batch_get([b"k1", b"k2"])) + +# can read the data finally +# because snapshot2's timestamp > txn2's commit timestamp +snapshot2 = client.snapshot(client.current_timestamp()) +print(snapshot2.batch_get([b"k1", b"k2"])) +``` + +Run the test script + +```bash +python3 test_snapshot_isolation.py + +[] +[] +[(b'k1', b'Snapshot'), (b'k2', b'Isolation')] +``` + +From the above example, you can find that `snapshot1` cannot read the data before and after `txn2` is commited. This indicates that `snapshot1` can see a consistent snapshot of the database. + +## Try optimistic transaction model + +TiKV supports distributed transactions using either pessimistic or optimistic transaction models. + +TiKV uses the optimistic transaction model by default. With optimistic transactions, conflicting changes are detected as part of a transaction commit. This helps improve the performance when concurrent transactions infrequently modify the same rows, because the process of acquiring row locks can be skipped. + +The following example shows how to test TiKV with optimistic transaction model. + +Save the following script to file `test_optimistic.py`. + +```python +from tikv_client import TransactionClient + +client = TransactionClient.connect("127.0.0.1:2379") + +# clean +txn1 = client.begin(pessimistic=False) +txn1.delete(b"k1") +txn1.delete(b"k2") +txn1.commit() + +# create txn2 and put k1 & k2 +txn2 = client.begin(pessimistic=False) +txn2.put(b"k1", b"Optimistic") +txn2.put(b"k2", b"Mode") + +# create txn3 and put k1 +txn3 = client.begin(pessimistic=False) +txn3.put(b"k1", b"Optimistic") + +# txn2 commit successfully +txn2.commit() + +# txn3 commit failed because of conflict +# with optimistic transactions conflicting changes are detected when the transaction commits +txn3.commit() +``` + +Run the test script + +```bash +python3 test_optimistic.py + +Exception: KeyError WriteConflict +``` + +From the above example, you can find that with optimistic transactions, conflicting changes are detected when the transaction commits. + +## Try pessimistic transaction model + +In the optimistic transaction model, transactions might fail to be committed because of write–write conflict in heavy contention scenarios. In the case that concurrent transactions frequently modify the same rows (a conflict), pessimistic transactions might perform better than optimistic transactions. + +The following example shows how to test TiKV with pessimistic transaction model. + +Save the following script to file `test_pessimistic.py`. + +```python +from tikv_client import TransactionClient + +client = TransactionClient.connect("127.0.0.1:2379") + +# clean +txn1 = client.begin(pessimistic=True) +txn1.delete(b"k1") +txn1.delete(b"k2") +txn1.commit() + +# create txn2 +txn2 = client.begin(pessimistic=True) + +# put k1 & k2 +txn2.put(b"k1", b"Pessimistic") +txn2.put(b"k2", b"Mode") + +# create txn3 +txn3 = client.begin(pessimistic=True) + +# put k1 +# txn3 put data failed because of conflict +# with pessimistic transactions conflicting changes are detected when writing data +txn3.put(b"k1", b"Pessimistic") +``` + +Run the test script + +```bash +python3 test_pessimistic.py + +Exception: KeyError +``` + +From the above example, you can find that with pessimistic transactions, conflicting changes are detected at the moment of data writing. diff --git a/content/docs/7.1/concepts/explore-tikv-features/fault-tolerance.md b/content/docs/7.1/concepts/explore-tikv-features/fault-tolerance.md new file mode 100644 index 00000000..06c4da1d --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/fault-tolerance.md @@ -0,0 +1,234 @@ +--- +title: Fault Tolerance and Recovery +description: Learn how TiKV recovers from failures. +menu: + "7.1": + parent: Features-7.1 + weight: 2 + identifier: Fault Tolerance and Recovery-7.1 +--- + +This document walks you through a demonstration of how TiKV recovers from failures and continues providing services when some nodes fail. You can follow the steps of this demonstration and perform operations on your own. In this way, you will have a hands-on experience of the fault tolerance feature of TiKV. + +The demonstration consists of two experiments: a single-node failure simulation, where one node is taken offline, and then a two-node failure, where two TiKV nodes are simultaneously taken offline. In both failures, the cluster repairs itself by re-replicating missing data to other nodes, and you can see how the cluster continues running uninterrupted. + +The process is as follows: + +1. [Prepare a TiKV cluster for test](#prepare-a-tikv-cluster-for-test). +2. [Run a workload against TiKV](#run-a-workload-against-tikv). +3. [Experiment 1: Simulate a single-node failure](#experiment-1-simulate-a-single-node-failure). +4. [Experiment 2: Simulate two simultaneous node failures](#experiment-2-simulate-two-simultaneous-node-failures). +5. [Clean up the test cluster](#clean-up-the-test-cluster). + +## Prepare a TiKV cluster for test + +Before the process of failure simulation begins, the following requirements are already met: + ++ [TiUP](https://github.com/pingcap/tiup) has been installed (v1.5.2 or later) as described in [TiKV in 5 Minutes](../../tikv-in-5-minutes). ++ [client-py](https://github.com/tikv/client-py) has been installed. It is used to interact with the TiKV cluster. + +### Step 1. Start a six-node cluster + +Use the `tiup playground` command to start a six-node local TiKV cluster: + +```sh +tiup playground --mode tikv-slim --kv 6 +``` + +The output of this command will show the components' addresses. These addresses will be used in the following steps. + +``` +Playground Bootstrapping... +Start pd instance +Start tikv instance +Start tikv instance +Start tikv instance +Start tikv instance +Start tikv instance +Start tikv instance +PD client endpoints: [127.0.0.1:2379] +To view the Prometheus: http://127.0.0.1:44549 +To view the Grafana: http://127.0.0.1:3000 +``` + +{{< info >}} +Each Region has three replicas according to the default configuration. +{{< /info >}} + +### Step 2. Import data to TiKV + +Start a new terminal session, and use [go-ycsb](https://github.com/pingcap/go-ycsb) to launch a workload of writing data to the TiKV cluster. + +1. Clone `go-ycsb` from GitHub. + + ```sh + git clone https://github.com/pingcap/go-ycsb.git + ``` + +2. Build the application from the source. + + ```sh + make + ``` + +3. Load a workload using `go-ycsb` with **10000** keys into the TiKV cluster. + + ```sh + ./bin/go-ycsb load tikv -P workloads/workloada -p tikv.pd="127.0.0.1:2379" -p tikv.type="raw" -p recordcount=1000000 + ``` + + The expected output is as follows: + + ``` + Run finished, takes 11.722575701s + INSERT - Takes(s): 11.7, Count: 10000, OPS: 855.2, Avg(us): 18690, Min(us): 11262, Max(us): 61304, 99th(us): 36000, 99.9th(us): 58000, 99.99th(us): 62000 + ``` + +### Step 3: Verify the data import + +Use the client-py tool to verify the data imported in the last step. Note that the Python 3.5+ REPL environment is required for such verification. It is expected that the key count in the output matches the `recordcount` in the `go-ycsb` command in the previous step. + +```python +>>> from tikv_client import RawClient +>>> client = RawClient.connect("127.0.0.1:2379") +>>> len(client.scan_keys(None, None, 10000)) +10000 +``` + +The evaluation of the last expression should be **10000**, as the `recordcount` has been specified in the `go-ycsb` command. + +## Run a workload against TiKV + +### Step 1. Run a sample workload + +Enter the source directory of `go-ycsb` and use the following command to run the `workloada` from the YCSB benchmark. + +`workloada` simulates multiple client connections and performs a mix of reads (50%) and writes (50%) per connection. + +```sh +./bin/go-ycsb run tikv -P workloads/workloada -p tikv.pd="127.0.0.1:2379" -p tikv.type="raw" -p tikv.conncount=16 -p threadcount=16 -p recordcount=10000 -p operationcount=1000000 +``` + +Per-operation statistics are printed to the standard output every second. + +``` +... +READ - Takes(s): 10.0, Count: 7948, OPS: 796.2, Avg(us): 395, Min(us): 72, Max(us): 20545, 99th(us): 2000, 99.9th(us): 19000, 99.99th(us): 21000 +UPDATE - Takes(s): 10.0, Count: 7945, OPS: 796.8, Avg(us): 19690, Min(us): 11589, Max(us): 40429, 99th(us): 34000, 99.9th(us): 41000, 99.99th(us): 41000 +READ - Takes(s): 20.0, Count: 15858, OPS: 793.6, Avg(us): 380, Min(us): 68, Max(us): 20545, 99th(us): 2000, 99.9th(us): 3000, 99.99th(us): 21000 +UPDATE - Takes(s): 20.0, Count: 15799, OPS: 791.1, Avg(us): 19834, Min(us): 10505, Max(us): 41090, 99th(us): 35000, 99.9th(us): 40000, 99.99th(us): 41000 +... +``` + +{{< info >}} +This workload above runs for several minutes, which is enough to simulate a node failure described as follows. +{{< /info >}} + +### Step 2. Check the workload on Grafana dashboard + +1. Open the [Grafana](https://grafana.com) dashboard by accessing [`http://127.0.0.1:3000`](http://127.0.0.1:3000) in your browser. + +2. Log in the dashboard by using the default username `admin` and password `admin`. + +3. Enter the dashboard **playground-tikv-summary**, and the OPS information is in the panel **gRPC message count** in the row **gRPC**. + + {{< figure + src="/img/docs/check-ops.png" + width="80" + number="1" >}} + +4. By default, TiKV replicates all data three times and balances the load across all stores. To see this balancing process, enter the page **playground-overview** and check the Region count across all nodes. In this example, a small amount of data is loaded. Thus only one Region is shown: + + {{< figure + src="/img/docs/fault-tolerance-region-count.png" + width="80" + number="1" >}} + +## Experiment 1: Simulate a single-node failure + +### Step 1: Stop the target process + +In TiKV, all read/write operations are handled by the leader of the Region group. See [architecture](https://tikv.org/docs/6.1/reference/architecture/overview/#system-architecture) for details. + +In this example, the only one leader in the cluster is stopped. Then the load continuity and cluster health are checked. + +1. Enter the Grafana dashboard **playground-overview**. The leader distribution is shown in the panel **leader** in row **TiKV**. + +2. In this example, the local process that opens the port `20180` holds only one leader in the cluster. Execute the following command to stop this process. + + ```sh + kill -STOP $(lsof -i:20180 | grep tikv | head -n1 | awk '{print $2}') + ``` + +### Step 2. Check the load continuity and cluster health on Grafana dashboard + +1. Check the leader distribution on the dashboard again. The monitoring metric shows that the leader is moved to another store. + + {{< figure + src="/img/docs/fault-tolerance-leader-recover.png" + width="80" + number="1" >}} + +2. Check the gRPC OPS. The monitoring metric shows that there is a short duration in which the TiKV instance is unavailable because the leader is down. However, the workload is back online as soon as the leader [election](https://raft.github.io/raft.pdf) is completed. + + {{< figure + src="/img/docs/fault-tolerance-ops.png" + width="80" + number="1" >}} + +## Experiment 2: Simulate two simultaneous node failures + +### Step 1: Stop the target processes + +In the above single-node failure simulation, the TiKV cluster has recovered. The leader of the cluster has been stopped, so there are five stores alive. Then, a new leader is elected after a while. + +Experiment 2 will increase the Region replicas of TiKV to five, stop two non-leader nodes simultaneously, and check the cluster status. + +{{< info >}} +The component version should be explicitly specified in the `tiup ctl` command. In the following example, the component version is v6.1.0. +{{< /info >}} + +1. Increase the replicas of the cluster to five: + + ```sh + tiup ctl:v6.1.0 pd config set max-replicas 5 + ``` + +2. Stop two non-leader nodes simultaneously. In this example, the processes that hold the ports `20181` and `20182` are stopped. The process IDs (PIDs) are `1009934` and `109941`. + + ```sh + kill -STOP 1009934 + kill -STOP 1009941 + ``` + +### Step 2: Check the load continuity and cluster health on Grafana dashboard + +1. Similar to [Step 2. Check the load continuity and cluster health on Grafana dashboard](#step-2-check-the-load-continuity-and-cluster-health-on-grafana-dashboard) in the single-node failure simulation, enter the Grafana dashboard and follow **playground-tikv-summary** -> **gRPC** -> **gRPC message count**. The metrics show that the workload continuity is not impacted because the leader is still alive. + + {{< figure + src="/img/docs/fault-tolerance-workload.png" + width="80" + number="1" >}} + +2. To further verify the load continuity and cluster health, `client-py` is used to read and write some data to prove that the cluster is still available. + + ```python + >>> from tikv_client import RawClient + >>> client = RawClient.connect("127.0.0.1:2379") + >>> len(client.scan_keys(None, None, 10240)) + 10000 + >>> client.put(b'key', b'value') + >>> len(client.scan_keys(None, None, 10240)) + 10001 + ``` + +## Clean up the test cluster + +After experiment 2 is finished, you might need to clean up the test cluster. To do that, take the following steps: + +1. Go back to the terminal session that you have just started the TiKV cluster and press ctrl + c and wait for the cluster to stop. +2. After the cluster is stopped, destroy the cluster using the following command: + + ```sh + tiup clean --all + ``` diff --git a/content/docs/7.1/concepts/explore-tikv-features/overview.md b/content/docs/7.1/concepts/explore-tikv-features/overview.md new file mode 100644 index 00000000..88ea8b94 --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/overview.md @@ -0,0 +1,22 @@ +--- +title: Features +description: The features of TiKV +menu: + "7.1": + parent: Get Started-7.1 + weight: 2 + identifier: Features-7.1 +--- + +TiKV offers the following key features: + +| Feature | Description | +| ------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Replication and rebalancing](../replication-and-rebalancing) | With the [Placement Driver](/docs/3.0/concepts/architecture#placement-driver) and carefully designed Raft groups, TiKV excels in horizontal scalability and can easily scale to over 100 terabytes of data. | +| [High fault tolerance and auto-recovery](../fault-tolerance) | TiKV applies the Raft consensus algorithm to replicate data to multiple nodes, thus achieving high fault tolerance. | +| [TTL (Time to Live) on RawKV](../ttl) | RawKV supports TTL to automatically clear expired Key-Value pairs. | +| [CAS (Compare-And-Swap) on RawKV](../cas) | RawKV supports the compare-and-swap operation to achieve synchronization in multi-threading. | +| [Distributed Transaction](../distributed-transaction) | Similar to [Google Spanner](https://ai.google/research/pubs/pub39966), TiKV supports externally consistent distributed transactions. | +| [TiKV API V2](../api-v2) | TiKV API v2 provides new storage format to support more features, such as **Changed Data Capture** and **Key Space**. | +| [RawKV BR](../backup-restore) | RawKV supports backup and restoration. | +| [RawKV CDC](../cdc/cdc) | RawKV supports Changed Data Capture. | \ No newline at end of file diff --git a/content/docs/7.1/concepts/explore-tikv-features/replication-and-rebalancing.md b/content/docs/7.1/concepts/explore-tikv-features/replication-and-rebalancing.md new file mode 100644 index 00000000..b285fe48 --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/replication-and-rebalancing.md @@ -0,0 +1,177 @@ +--- +title: Replication and Rebalancing +description: Learn how TiKV replicates, distributes, and rebalances data. +menu: + "7.1": + parent: Features-7.1 + weight: 1 + identifier: Replication and Rebalancing-7.1 +--- + +This document walks you through a simple demonstration of how TiKV replicates, distributes, and rebalances data. To start a 3-node local cluster, you need to perform the following operations: + +1. Write some data via [go-ycsb](https://github.com/pingcap/go-ycsb), and then verify whether the data is replicated in triplicate by default. +2. Add two more nodes and see how TiKV automatically rebalances replicas to efficiently use all available capacity. + +{{< warning >}} +Do not apply this operation in the production environment. +{{< /warning >}} + +## Prerequisites + +Make sure that you have installed [TiUP](https://github.com/pingcap/tiup) as described in [TiKV in 5 Minutes](../../tikv-in-5-minutes). + +## Step 1: Start a 3-node cluster + +1. Check your TiUP version. Execute the following command: + + ```sh + tiup -v + ``` + +2. Depending on the TiUP version, execute the `tiup-playground` command to start a 3-node local cluster. + + If TiUP version is v1.5.2 or later, execute the following command: + + ```sh + tiup playground --mode tikv-slim --kv 3 + ``` + + If TiUP version is earlier than v1.5.2, execute the following command: + + ```sh + tiup playground --kv 3 + ``` + +After you execute the command, the output is as follows: + +``` +Starting component `playground`: /home/pingcap/.tiup/components/playground/v1.5.0/tiup-playground --mode tikv-slim --kv 3 +Using the version v5.0.2 for version constraint "". + +If you'd like to use a TiDB version other than v5.0.2, cancel and retry with the following arguments: + + Specify version manually: tiup playground + Specify version range: tiup playground ^5 + The nightly version: tiup playground nightly + +Playground Bootstrapping... +Start pd instance +Start tikv instance +Start tikv instance +Start tikv instance +PD client endpoints: [127.0.0.1:2379] +To view the Prometheus: http://127.0.0.1:33703 +To view the Grafana: http://127.0.0.1:3000 +``` + +## Step 2: Write data + +On another terminal session, you can use [go-ycsb](https://github.com/pingcap/go-ycsb) to launch a workload. + +1. Clone the `go-ycsb` from GitHub. + + ```sh + git clone https://github.com/pingcap/go-ycsb.git + ``` + +2. Build the application from the source. + + ```sh + make + ``` + +3. Load a small workload using `go-ycsb`. + + ```sh + # By default, this workload will insert 1000 records into TiKV. + ./bin/go-ycsb load tikv -P workloads/workloada -p tikv.pd="127.0.0.1:2379" -p tikv.type="raw" + ``` + +## Step 3: Verify the replication + +To understand the replication in TiKV, it is important to review several concepts in the [architecture](https://github.com/tikv/tikv#tikv-software-stack). + +| Concept | Description | +| ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Region** | TiKV can be taken as a giant sorted map of key-value pairs. The Region is the basic unit of key-value data movement. Each Region is a range of keys and is replicated to multiple Nodes. These multiple replicas form a Raft group. | +| **Peer** | TiKV replicates each Region (three times by default) and stores each replica on a different peer. In the same node, it contains multiple peers of different Regions. | + +1. Open the Grafana at [http://localhost:3000](http://localhost:3000) (printed from the `tiup-playground` command), and then log in to Grafana using username `admin` and password `admin`. + +2. On the **playground-overview** dashboard, check the metrics on the **Region** panel in the **TiKV** tab. You can see that the numbers of Regions on all three nodes are the same, which indicates the following: + + * There is only one Region. It contains the data imported by `go-ycsb`. + * Each Region has 3 replicas (according to the default configuration). + * For each Region, each replica is stored in different stores. + +{{< figure + src="/img/docs/region-count.png" + width="80" + number="1" >}} + +## Step 4: Write more data + +In this section, you can launch a larger workload, scale the 3-node local cluster to a 5-node cluster, and then check whether the load of the TiKV cluster is **rebalanced** as expected. + +1. Start a new terminal session and launch a larger workload with `go-ycsb`. + For example, on a machine with 16 virtual cores, you can launch a workload by executing the following command: + + ```sh + ./bin/go-ycsb load tikv -P workloads/workloada -p tikv.pd="127.0.0.1:2379" -p tikv.type="raw" -p tikv.conncount=16 -p threadcount=16 -p recordcount=1000000 + ``` + +2. Go to the **playground-overview** dashboard of the Grafana, and check the Region distribution on the TiKV cluster. The number of Regions keeps increasing while writing data to the cluster as follows: + +{{< figure + src="/img/docs/region-count-after-load-data.png" + width="80" + number="1" >}} + +## Step 5: Add two more nodes + +1. Start another terminal session and use the `tiup playground` command to scale out the cluster. + + ```sh + tiup playground scale-out --kv 2 + ``` + +2. Verify the scale-out cluster by executing the following command: + + ```sh + tiup playground display + ``` + + The output is as follows: + + ``` + Pid Role Uptime + --- ---- ------ + 282731 pd 4h1m23.792495134s + 282752 tikv 4h1m23.77761744s + 282757 tikv 4h1m23.761628915s + 282761 tikv 4h1m23.748199302s + 308242 tikv 9m50.551877838s + 308243 tikv 9m50.537477856s + ``` + +## Step 6: Verify the data rebalance + +Go to the Grafana page as mentioned above. You can find some Regions are split and rebalanced to the two new nodes. + +{{< figure + src="/img/docs/data-rebalance.png" + width="80" + number="1" >}} + +## Step 7: Stop and delete the cluster + +If you do not need the local TiKV cluster anymore, you can stop and delete it. + +1. To stop the TiKV cluster, get back to the terminal session in which you have started the TiKV cluster. Press Ctrl + C and wait for the cluster to stop. + +2. After the cluster is stopped, to delete the cluster, execute the following command: + + ```sh + tiup clean --all + ``` diff --git a/content/docs/7.1/concepts/explore-tikv-features/ttl.md b/content/docs/7.1/concepts/explore-tikv-features/ttl.md new file mode 100644 index 00000000..2e3a4c14 --- /dev/null +++ b/content/docs/7.1/concepts/explore-tikv-features/ttl.md @@ -0,0 +1,108 @@ +--- +title: TTL on RawKV +description: Time to Live +menu: + "7.1": + parent: Features-7.1 + weight: 3 + identifier: TTL on RawKV-7.1 +--- + +This page walks you through a simple demonstration of how to use TTL (Time To Live) on RawKV. TTL is a data clearing mechanism that automatically deletes data after a specified period of time. For example: + +- If TTL is not used, the data written to TiKV will always exist in TiKV unless it is manually deleted. +- If TTL is used, and the TTL time of a key is set to one hour, the data of the key will be automatically deleted by TiKV after one hour. + +## Prerequisites + +Before you start, ensure that you have installed TiUP and jshell, and have downloaded the `tikv-client` JARS file according to [TiKV in 5 Minutes](../../tikv-in-5-minutes). + +## Step 1: Config TiKV to enable TTL + +TTL is disabled by default. To enable it, create a file `tikv.yaml` using the following configuration. + +```yaml +[storage] +enable-ttl = true +``` + +## Step 2: Start TiKV Cluster + +For this tutorial, only one TiKV node is needed, so the `tiup playground` command is used. + +Show TiUP version: + +```bash +tiup -v +``` + +version >= 1.5.2: + +```bash +tiup playground --mode tikv-slim --kv.config tikv.yaml +``` + +version < 1.5.2: + +```bash +tiup playground --kv.config tikv.yaml +``` + +## Step 3: Write the code to test TTL + +The following example shows how to verify the TTL works. + +Save the following script to file `test_raw_ttl.java`. + +```java +import java.util.*; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.raw.RawKVClient; +import org.tikv.shade.com.google.protobuf.ByteString; + +TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); +TiSession session = TiSession.create(conf); +RawKVClient client = session.createRawClient(); + +// write (k1, v1) with ttl=30 seconds +client.put(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("v1"), 30); + +// write (k2, v2) without ttl +client.put(ByteString.copyFromUtf8("k2"), ByteString.copyFromUtf8("v2")); + +// get k1 & k2 resturns v1 & v2 +System.out.println(client.batchGet(new ArrayList() {{ + add(ByteString.copyFromUtf8("k1")); + add(ByteString.copyFromUtf8("k2")); +}})); + +// sleep 30 seconds +System.out.println("Sleep 30 seconds.") +Thread.sleep(30000); + +// get k1 & k2 returns v2 +// k1's ttl is expired +System.out.println(client.batchGet(new ArrayList() {{ + add(ByteString.copyFromUtf8("k1")); + add(ByteString.copyFromUtf8("k2")); +}})); +``` + +## Step 4: Run the code + +```bash +jshell --class-path tikv-client-java.jar:slf4j-api.jar --startup test_raw_ttl.java + +[key: "k1" +value: "v1" +, key: "k2" +value: "v2" +] +Sleep 30 seconds. +[key: "k2" +value: "v2" +] +``` + +After running the above code, you can find that `k1` is automatically deleted when its TTL has expired. diff --git a/content/docs/7.1/concepts/overview.md b/content/docs/7.1/concepts/overview.md new file mode 100644 index 00000000..0e43c22a --- /dev/null +++ b/content/docs/7.1/concepts/overview.md @@ -0,0 +1,39 @@ +--- +title: Get Started +description: TiKV Introduction +menu: + "7.1": + weight: 2 + identifier: Get Started-7.1 +--- + +**TiKV** is a highly scalable, low latency, and easy to use key-value database that delivers performance less than 10 ms at any scale. + +TiKV is intended to fill the role of a unified distributed storage layer. TiKV excels at working with **large-scale data** by supporting petabyte-scale deployments spanning trillions of rows. + +As a graduate project of the [Cloud Native Computing Foundation](https://www.cncf.io/), TiKV is originally created by [PingCAP](https://pingcap.com/en) to complement [TiDB](https://github.com/pingcap/tidb). + +{{< info >}} +The **Ti** in TiKV stands for **titanium**. Titanium has the highest strength-to-density ratio than any other metallic elements and is named after the Titans of Greek mythology. +{{< /info >}} + +## Architecture + +A TiKV cluster consists of the following components: + +- A group of TiKV nodes: store key-value pair data +- A group of Placement Driver (PD) nodes: work as the manager of the TiKV cluster + +TiKV clients let you connect to a TiKV cluster and use raw (simple get/put) API or transaction (with transactional consistency guarantees) API to access and update your data. TiKV clients interact with PD and TiKV through gRPC. + +{{< figure + src="/img/basic-architecture.png" + alt="TiKV architecture diagram" + caption="Architecture of TiKV" + width="70" >}} + +For more information about the architecture, see [Core concepts and architecture behind TiKV](../../reference/architecture/overview/). + +## What's Next + +[TiKV in 5 Minutes](../tikv-in-5-minutes/) is strongly recommended if you want to try TiKV. diff --git a/content/docs/7.1/concepts/tikv-in-5-minutes.md b/content/docs/7.1/concepts/tikv-in-5-minutes.md new file mode 100644 index 00000000..c9e931c1 --- /dev/null +++ b/content/docs/7.1/concepts/tikv-in-5-minutes.md @@ -0,0 +1,284 @@ +--- +title: TiKV in 5 Minutes +description: Get started using TiKV in 5 Minutes +menu: + "7.1": + parent: Get Started-7.1 + weight: 1 + identifier: TiKV in 5 Minutes-7.1 +--- + +This tutorial provides you a quick way to get started with TiKV, including the following operations: + +- [Set up a local TiKV cluster with the default options](#set-up-a-local-tikv-cluster-with-the-default-options) +- [Monitor the TiKV cluster](#monitor-the-cluster) +- [Write data to and read data from the TiKV cluster](#write-data-to-and-read-data-from-the-tikv-cluster) +- [Stop and delete a TiKV cluster](#stop-and-delete-the-tikv-cluster) + +## Prerequisites + +Before you start, ensure that you are using macOS or Linux. + +{{< warning >}} +This quick-start tutorial is only for test environments. For production environments, refer to [Install TiKV](../../deploy/install/install/). +{{< /warning >}} + +## Set up a local TiKV cluster with the default options + +1. Install TiUP by executing the following command: + + ```bash + curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh + ``` + +2. Set the TiUP environment variables: + + 1. Redeclare the global environment variables: + + ```bash + source .bash_profile + ``` + + 2. Confirm whether TiUP is installed: + + ```bash + tiup + ``` + +3. If TiUP is already installed, update the TiUP Playground component to the latest version: + + ```bash + tiup update --self && tiup update playground + ``` + +4. Use TiUP Playground to start a local TiKV cluster. Before you do that, check your TiUP version using the following command: + + ```bash + tiup -v + ``` + + - If your TiUP version is v1.5.2 or later, execute the following command to start a local TiKV cluster: + + ```bash + tiup playground --mode tikv-slim + ``` + + - If your TiUP version is earlier than v1.5.2, execute the following command to start a local TiKV cluster: + + ```bash + tiup playground + ``` + + {{< info >}} +Refer to [TiUP playground document](https://docs.pingcap.com/tidb/stable/tiup-playground) for more TiUP Playground commands. + {{< /info >}} + +## Monitor the TiKV cluster + +After the TiKV cluster is started using TiUP Playground, to monitor the cluster metrics, perform the following steps: + +1. Open your browser, access [http://127.0.0.1:3000](http://127.0.0.1:3000), and then log in to the Grafana Dashboard. + + By default, the username is `admin` and the password is `admin`. + +2. Open the `TiKV-Summary` page on the Grafana Dashboard and find the monitoring metrics of your TiKV cluster. + +## Write data to and read data from the TiKV cluster + +To write to and read from the TiKV cluster, you can use Java, Go, Rust, C, or Python script. + +The following two examples use Java and Python respectively to show how to write "Hello World!" to TiKV. + +### Use Java + +1. Download the JAR files using the following commands: + + ```bash + curl -o tikv-client-java.jar https://github.com/tikv/client-java/releases/download/v3.2.0-rc/tikv-client-java-3.2.0-SNAPSHOT.jar -L && \ + curl -o slf4j-api.jar https://repo1.maven.org/maven2/org/slf4j/slf4j-api/1.7.16/slf4j-api-1.7.16.jar + ``` + +2. Install `jshell`. The JDK version should be 9.0 or later. + +3. Try the `RAW KV` API. + + 1. Save the following script to the `test_raw.java` file. + + ```java + import java.util.ArrayList; + import java.util.List; + import java.util.Optional; + import org.tikv.common.TiConfiguration; + import org.tikv.common.TiSession; + import org.tikv.kvproto.Kvrpcpb; + import org.tikv.raw.RawKVClient; + import org.tikv.shade.com.google.protobuf.ByteString; + + TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); + TiSession session = TiSession.create(conf); + RawKVClient client = session.createRawClient(); + + // put + client.put(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("Hello")); + client.put(ByteString.copyFromUtf8("k2"), ByteString.copyFromUtf8(",")); + client.put(ByteString.copyFromUtf8("k3"), ByteString.copyFromUtf8("World")); + client.put(ByteString.copyFromUtf8("k4"), ByteString.copyFromUtf8("!")); + client.put(ByteString.copyFromUtf8("k5"), ByteString.copyFromUtf8("Raw KV")); + + // get + Optional result = client.get(ByteString.copyFromUtf8("k1")); + System.out.println(result.get().toStringUtf8()); + + // batch get + List list =client.batchGet(new ArrayList() {{ + add(ByteString.copyFromUtf8("k1")); + add(ByteString.copyFromUtf8("k3")); + }}); + System.out.println(list); + + // scan + list = client.scan(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("k6"), 10); + System.out.println(list); + + // close + client.close(); + session.close(); + ``` + + 2. Run the `test_raw.java` script to write "Hello World!" to TiKV: + + ```bash + jshell --class-path tikv-client-java.jar:slf4j-api.jar --startup test_raw.java + ``` + + The output is as follows: + + ```bash + Hello + [key: "k1" + value: "Hello" + , key: "k3" + value: "World" + ] + [key: "k1" + value: "Hello" + , key: "k2" + value: "," + , key: "k3" + value: "World" + , key: "k4" + value: "!" + , key: "k5" + value: "Raw KV" + ] + ``` + +### Use Python + +1. Install the `tikv-client` python package. + + ```bash + pip3 install -i https://test.pypi.org/simple/ tikv-client + ``` + + {{< info >}} +This package requires Python 3.5+. + {{< /info >}} + +2. Use either the `RAW KV` API or `TXN KV` API to write data to TiKV. + + - Try the `RAW KV` API. + + 1. Save the following Python script to the `test_raw.py` file. + + ```python + from tikv_client import RawClient + + client = RawClient.connect("127.0.0.1:2379") + + # put + client.put(b"k1", b"Hello") + client.put(b"k2", b",") + client.put(b"k3", b"World") + client.put(b"k4", b"!") + client.put(b"k5", b"Raw KV") + + # get + print(client.get(b"k1")) + + # batch get + print(client.batch_get([b"k1", b"k3"])) + + # scan + print(client.scan(b"k1", end=b"k5", limit=10, include_start=True, include_end=True)) + ``` + + 2. Run the `test_raw.py` script to write "Hello World!" to TiKV: + + ```bash + python3 test_raw.py + ``` + + The output is as follows: + + ```bash + b'Hello' + [(b'k3', b'World'), (b'k1', b'Hello')] + [(b'k1', b'Hello'), (b'k2', b','), (b'k3', b'World'), (b'k4', b'!'), (b'k5', b'Raw KV')] + ``` + + - Try the `TXN KV` API. + + 1. Save the following Python script to the `test_txn.py` file. + + ```python + from tikv_client import TransactionClient + + client = TransactionClient.connect("127.0.0.1:2379") + + # put + txn = client.begin() + txn.put(b"k1", b"Hello") + txn.put(b"k2", b",") + txn.put(b"k3", b"World") + txn.put(b"k4", b"!") + txn.put(b"k5", b"TXN KV") + txn.commit() + + snapshot = client.snapshot(client.current_timestamp()) + + # get + print(snapshot.get(b"k1")) + + # batch get + print(snapshot.batch_get([b"k1", b"k3"])) + + # scan + print(snapshot.scan(b"k1", end=b"k5", limit=10, include_start=True, include_end=True)) + ``` + + 2. Run the `test_txn.py` script to write "Hello World!" to TiKV: + + ```bash + python3 test_txn.py + ``` + + The output is as follows: + + ```bash + b'Hello' + [(b'k3', b'World'), (b'k1', b'Hello')] + [(b'k1', b'Hello'), (b'k2', b','), (b'k3', b'World'), (b'k4', b'!'), (b'k5', b'TXN KV')] + ``` + +## Stop and delete the TiKV cluster + +If you do not need the local TiKV cluster anymore, you can stop and delete it. + +1. To stop the TiKV cluster, get back to the terminal session in which you have started the TiKV cluster. Press Ctrl + C and wait for the cluster to stop. + +2. After the cluster is stopped, to delete the cluster, execute the following command: + + ```sh + tiup clean --all + ``` diff --git a/content/docs/7.1/concepts/whats-next.md b/content/docs/7.1/concepts/whats-next.md new file mode 100644 index 00000000..75c24f7a --- /dev/null +++ b/content/docs/7.1/concepts/whats-next.md @@ -0,0 +1,31 @@ +--- +title: What's Next +description: +menu: + "7.1": + parent: Get Started-7.1 + weight: 3 + identifier: What's Next-7.1 +--- + +Depending on your role, the following resources are recommended to learn more about TiKV: + +- As a TiKV cluster administrator, you can refer to the following administration instructions: + + - Install a TiKV cluster: [Install TiKV](../../deploy/install/install/) + - Config a TiKV cluster: [Configure TiKV](../../deploy/configure/introduction/) + - Benchmark of a TiKV cluster: [Benchmark and Performance](../../deploy/benchmark/benchmark/) + - Monitor a TiKV cluster: [Monitor and Alert](../../deploy/monitor/monitor/) + - Maintain and operate a TiKV cluster: [Operate TiKV Cluster](../../deploy/operate/operate/) + - Administrate TiKV Cluster with CLI Tools: [CLI](../../reference/cli/introduction/) + +- As an application developer, you can refer to the following development instructions: + + - Interact with a TiKV cluster using TiKV clients: [TiKV clients](../../develop/clients/introduction/) + - Interact with TiKV using RawKV API: [RawKV](../../develop/rawkv/introduction/) + - Learn the architecture of TiKV: [Architecture](../../reference/architecture/introduction/) + +- If you want to become a TiKV contributor, you can refer to the following contribution instructions: + + - How to be a TiKV Contributor: [Contribute](/community/contribute/) + - The internal of TiKV: [Deep Dive](/deep-dive/introduction/) diff --git a/content/docs/7.1/deploy/configure/coprocessor.md b/content/docs/7.1/deploy/configure/coprocessor.md new file mode 100644 index 00000000..7497ddd7 --- /dev/null +++ b/content/docs/7.1/deploy/configure/coprocessor.md @@ -0,0 +1,11 @@ +--- +title: Coprocessor Config +description: Learn how to configure Coprocessor in TiKV. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 14 + identifier: Coprocessor Config-7.1 +--- + +You can find all the configuration parameters related to Coprocessor [here](../tikv-configuration-file/#coprocessor). diff --git a/content/docs/7.1/deploy/configure/grpc.md b/content/docs/7.1/deploy/configure/grpc.md new file mode 100644 index 00000000..2a46f69c --- /dev/null +++ b/content/docs/7.1/deploy/configure/grpc.md @@ -0,0 +1,11 @@ +--- +title: gRPC Config +description: Learn how to configure gRPC in TiKV. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 13 + identifier: gRPC Config-7.1 +--- + +You can find all the configuration parameters related to gRPC [here](../tikv-configuration-file/#server) (started with `grpc-`). diff --git a/content/docs/7.1/deploy/configure/introduction.md b/content/docs/7.1/deploy/configure/introduction.md new file mode 100644 index 00000000..da7f38ff --- /dev/null +++ b/content/docs/7.1/deploy/configure/introduction.md @@ -0,0 +1,33 @@ +--- +title: Configure TiKV +description: Configure a wide range of TiKV facets, including RocksDB, gRPC, the Placement Driver, and more. +menu: + "7.1": + parent: Deploy-7.1 + weight: 2 + identifier: Configure TiKV-7.1 +--- + +Although you are recommended to get started with TiKV using the default configuration, TiKV provides many configuration parameters to tweak its behavior, which allows you to configure the cluster to suit your special application requirements. + +The following list of documents guide you on how to configure different TiKV components: + +PD + +- [PD Command Line Parameters](../pd-command-line): Learn configuration flags of PD. +- [PD Config](../pd-configuration-file): Learn the PD configuration file. + +TiKV + +- [TiKV Command Line Parameters](../tikv-command-line): Learn configuration flags of TiKV. +- [TiKV Config](../tikv-configuration-file): Learn the TiKV configuration file. +- [Security](../security): Use TLS security and review security procedures. +- [Topology Lable](../topology): Use location awareness to improve resiliency and performance. +- [Limit](../limit): Learn how to configure scheduling rate limit on stores. +- [Region Merge](../region-merge): Tweak Region merging. +- [RocksDB](../rocksdb): Tweak RocksDB configuration parameters. +- [Raftstore](../raftstore): Learn how to configure Raftstore in TiKV. +- [Titan](../titan): Enable Titan to improve performance with large values. +- [Storage](../storage): Learn how to configure storage in TiKV. +- [gRPC](../grpc): Learn how to configure gRPC in TiKV. +- [Coprocessor](../coprocessor): Learn how to configure Coprocessor in TiKV. diff --git a/content/docs/7.1/deploy/configure/limit.md b/content/docs/7.1/deploy/configure/limit.md new file mode 100644 index 00000000..f65d7c86 --- /dev/null +++ b/content/docs/7.1/deploy/configure/limit.md @@ -0,0 +1,77 @@ +--- +title: Scheduling Limit Config +description: Learn how to configure scheduling rate limit on stores. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 7 + identifier: Scheduling Limit Config-7.1 +--- + +This document describes how to configure the scheduling rate limit at the store level. + +TiKV balance regions by the commands sent by PD. These commands are called scheduling operators. PD makes the scheduling operators based on the information gathered from TiKV and scheduling configurations. + +`*-schedule-limit` in `pd-ctl` is usually used to set limits of the total number of various operators. However, `*-schedule-limit` might cause performance bottlenecks, because it applies to the entire cluster. + +## Configure scheduling rate limits on stores + +PD provides two methods to configure scheduling rate limits on stores as follows: + +1. Permanently set the scheduling rate limits by [store-balance-rate](../pd-configuration-file/#store-balance-rate) in `pd-ctl`. + + {{< info >}} +The configuration change only applies to the stores that are started afterward. If you want to apply this change to all stores, you need to restart all TiKV stores. If you want to apply this change immediately, see the [workaround](#workaround) below. + {{< /info >}} + + `store-balance-rate` specifies the maximum number of scheduling tasks allowed for each store per minute. The scheduling step includes adding peers or learners. + + ```bash + » config set store-balance-rate 20 + ``` + +2. Temporarily set the scheduling rate limits by `limit` in `pd-ctl`. + + {{< info >}} +The scheduling rate limit set by this method is lost after restarting TiKV, and the value previously set by method 1 is used instead. + {{< /info >}} + + - **`stores set limit `** + + ```bash + # Set the maximum number of scheduling operators per minute to be 20. Apply to all stores. + » stores set limit 20 + ``` + + - **`store limit `** + + ```bash + # Set the maximum number of scheduling operators per minute to be 20. Apply to store 2. + » store limit 2 10 + ``` + +## Read current scheduling rate limits on stores + + - **`store show limit`** + + ```bash + » stores show limit + { + "4": { + "rate": 15 + }, + "5": { + "rate": 15 + }, + # ... + } + ``` + +## Workaround + +By combining method 1 and method 2, you can permanently modify the rate limit to 20 and applies it immediately. + + ```bash + » config set store-balance-rate 20 + » stores set limit 20 + ``` diff --git a/content/docs/7.1/deploy/configure/pd-command-line.md b/content/docs/7.1/deploy/configure/pd-command-line.md new file mode 100644 index 00000000..62b8f21e --- /dev/null +++ b/content/docs/7.1/deploy/configure/pd-command-line.md @@ -0,0 +1,109 @@ +--- +title: PD Command Line Parameters +description: Learn configuration flags of PD +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 1 + identifier: PD Command Line Parameters-7.1 +--- + +PD is configurable using command-line flags and environment variables. + +## `--advertise-client-urls` + +- The list of advertise URLs for the client to access PD +- Default: `"${client-urls}"` +- In some situations such as in the Docker or NAT network environment, if a client cannot access PD through the default client URLs listened to by PD, you must manually set the advertise client URLs. +- For example, the internal IP address of Docker is `172.17.0.1`, while the IP address of the host is `192.168.100.113` and the port mapping is set to `-p 2379:2379`. In this case, you can set `--advertise-client-urls` to `"http://192.168.100.113:2379"`. The client can find this service through `"http://192.168.100.113:2379"`. + +## `--advertise-peer-urls` + +- The list of advertise URLs for other PD nodes (peers) to access a PD node +- Default: `"${peer-urls}"` +- In some situations such as in the Docker or NAT network environment, if the other nodes (peers) cannot access the PD node through the default peer URLs listened to by this PD node, you must manually set the advertise peer URLs. +- For example, the internal IP address of Docker is `172.17.0.1`, while the IP address of the host is `192.168.100.113` and the port mapping is set to `-p 2380:2380`. In this case, you can set `--advertise-peer-urls` to `"http://192.168.100.113:2380"`. The other PD nodes can find this service through `"http://192.168.100.113:2380"`. + +## `--client-urls` + +- The list of client URLs to be listened to by PD +- Default: `"http://127.0.0.1:2379"` +- When you deploy a cluster, you must specify the IP address of the current host as `--client-urls` (for example, `"http://192.168.100.113:2379"`). If the cluster runs on Docker, specify the IP address of Docker as `"http://0.0.0.0:2379"`. + +## `--peer-urls` + +- The list of peer URLs to be listened to by a PD node +- Default: `"http://127.0.0.1:2380"` +- When you deploy a cluster, you must specify `--peer-urls` as the IP address of the current host, such as `"http://192.168.100.113:2380"`. If the cluster runs on Docker, specify the IP address of Docker as `"http://0.0.0.0:2380"`. + +## `--config` + +- The configuration file +- Default: "" +- If you set the configuration using the command line, the same setting in the configuration file will be overwritten. + +## `--data-dir` + +- The path to the data directory +- Default: "default.${name}" + +## `--initial-cluster` + +- The initial cluster configuration for bootstrapping +- Default: `"{name}=http://{advertise-peer-url}"` +- For example, if `name` is "pd", and `advertise-peer-urls` is `"http://192.168.100.113:2380"`, the `initial-cluster` is `"pd=http://192.168.100.113:2380"`. +- If you need to start three PD servers, the `initial-cluster` might be: + + ``` + pd1=http://192.168.100.113:2380, pd2=http://192.168.100.114:2380, pd3=192.168.100.115:2380 + ``` + +## `--join` + +- Join the cluster dynamically +- Default: "" +- If you want to join an existing cluster, you can use `--join="${advertise-client-urls}"`, the `advertise-client-url` is any existing PD's, multiply advertise client urls are separated by comma. + +## `-L` + +- The log level +- Default: "info" +- You can choose from debug, info, warn, error, or fatal. + +## `--log-file` + +- The log file +- Default: "" +- If this flag is not set, logs will be written to stderr. Otherwise, logs will be stored in the log file which will be automatically rotated every day. + +## `--log-rotate` + +- To enable or disable log rotation +- Default: true +- When the value is true, follow the `[log.file]` in PD configuration files. + +## `--name` + +- The human-readable unique name for this PD member +- Default: "pd" +- If you want to start multiply PDs, you must use different name for each one. + +## `--cacert` + +- The file path of CA, used to enable TLS +- Default: "" + +## `--cert` + +- The path of the PEM file including the X509 certificate, used to enable TLS +- Default: "" + +## `--key` + +- The path of the PEM file including the X509 key, used to enable TLS +- Default: "" + +## `--metrics-addr` + +- The address of Prometheus Pushgateway, which does not push data to Prometheus by default. +- Default: "" diff --git a/content/docs/7.1/deploy/configure/pd-configuration-file.md b/content/docs/7.1/deploy/configure/pd-configuration-file.md new file mode 100644 index 00000000..84c6be2c --- /dev/null +++ b/content/docs/7.1/deploy/configure/pd-configuration-file.md @@ -0,0 +1,385 @@ +--- +title: PD Config +description: Learn the PD configuration file +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 2 + identifier: PD Config-7.1 +--- + +The PD configuration file supports more options than command-line parameters. You can find the default configuration file [here](https://github.com/pingcap/pd/blob/release-5.0/conf/config.toml). + +This document only describes parameters that are not included in command-line parameters. Check [here](../pd-command-line) for the command line parameters. + +### `name` + +- The unique name of a PD node +- Default value: `"pd"` +- To start multiply PD nodes, use a unique name for each node. + +### `data-dir` + +- The directory in which PD stores data +- Default value: `default.${name}"` + +### `client-urls` + +- The list of client URLs to be listened to by PD +- Default value: `"http://127.0.0.1:2379"` +- When you deploy a cluster, you must specify the IP address of the current host as `client-urls` (for example, `"http://192.168.100.113:2379"`). If the cluster runs on Docker, specify the IP address of Docker as `"http://0.0.0.0:2379"`. + +### `advertise-client-urls` + +- The list of advertise URLs for the client to access PD +- Default value: `"${client-urls}"` +- In some situations such as in the Docker or NAT network environment, if a client cannot access PD through the default client URLs listened to by PD, you must manually set the advertise client URLs. +- For example, the internal IP address of Docker is `172.17.0.1`, while the IP address of the host is `192.168.100.113` and the port mapping is set to `-p 2380:2380`. In this case, you can set `advertise-client-urls` to `"http://192.168.100.113:2380"`. The client can find this service through `"http://192.168.100.113:2380"`. + +### `peer-urls` + +- The list of peer URLs to be listened to by a PD node +- Default value: `"http://127.0.0.1:2380"` +- When you deploy a cluster, you must specify `peer-urls` as the IP address of the current host, such as `"http://192.168.100.113:2380"`. If the cluster runs on Docker, specify the IP address of Docker as `"http://0.0.0.0:2380"`. + +### `advertise-peer-urls` + +- The list of advertise URLs for other PD nodes (peers) to access a PD node +- Default: `"${peer-urls}"` +- In some situations such as in the Docker or NAT network environment, if the other nodes (peers) cannot access the PD node through the default peer URLs listened to by this PD node, you must manually set the advertise peer URLs. +- For example, the internal IP address of Docker is `172.17.0.1`, while the IP address of the host is `192.168.100.113` and the port mapping is set to `-p 2380:2380`. In this case, you can set `advertise-peer-urls` to `"http://192.168.100.113:2380"`. The other PD nodes can find this service through `"http://192.168.100.113:2380"`. + +### `initial-cluster` + +- The initial cluster configuration for bootstrapping +- Default value: `"{name}=http://{advertise-peer-url}"` +- For example, if `name` is "pd", and `advertise-peer-urls` is `"http://192.168.100.113:2380"`, the `initial-cluster` is `"pd=http://192.168.100.113:2380"`. +- If you need to start three PD servers, the `initial-cluster` might be: + + ``` + pd1=http://192.168.100.113:2380, pd2=http://192.168.100.114:2380, pd3=192.168.100.115:2380 + ``` + +### `initial-cluster-state` + ++ The initial state of the cluster ++ Default value: `"new"` + +### `initial-cluster-token` + ++ Identifies different clusters during the bootstrap phase. ++ Default value: `"pd-cluster"` ++ If multiple clusters that have nodes with same configurations are deployed successively, you must specify different tokens to isolate different cluster nodes. + +### `lease` + ++ The timeout of the PD Leader Key lease. After the timeout, the system re-elects a Leader. ++ Default value: `3` ++ Unit: second + +### `tso-save-interval` + ++ The interval for PD to allocate TSOs for persistent storage in etcd ++ Default value: `3` ++ Unit: second + +### `enable-prevote` + ++ Enables or disables `raft prevote` ++ Default value: `true` + +### `quota-backend-bytes` + ++ The storage size of the meta-information database, which is 8GiB by default ++ Default value: `8589934592` + +### `auto-compaction-mod` + ++ The automatic compaction modes of the meta-information database ++ Available options: `periodic` (by cycle) and `revision` (by version number). ++ Default value: `periodic` + +### `auto-compaction-retention` + ++ The time interval for automatic compaction of the meta-information database when `auto-compaction-retention` is `periodic`. When the compaction mode is set to `revision`, this parameter indicates the version number for the automatic compaction. ++ Default value: 1h + +### `force-new-cluster` + ++ Determines whether to force PD to start as a new cluster and modify the number of Raft members to `1` ++ Default value: `false` + +### `tick-interval` + ++ The tick period of etcd Raft ++ Default value: `100ms` + +### `election-interval` + ++ The timeout for the etcd leader election ++ Default value: `3s` + +### `use-region-storage` + ++ Enables or disables independent Region storage ++ Default value: `false` + +## security + +Configuration items related to security + +### `cacert-path` + ++ The path of the CA file ++ Default value: "" + +### `cert-path` + ++ The path of the Privacy Enhanced Mail (PEM) file that contains the X509 certificate ++ Default value: "" + +### `key-path` + ++ The path of the PEM file that contains the X509 key ++ Default value: "" + +### `redact-info-log` + ++ Controls whether to enable log redaction in the PD log. ++ When you set the configuration value to `true`, user data is redacted in the PD log. ++ Default value: `false` + +## log + +Configuration items related to log + +### `format` + ++ The log format, which can be specified as "text", "json", or "console" ++ Default value: `text` + +### `disable-timestamp` + ++ Whether to disable the automatically generated timestamp in the log ++ Default value: `false` + +## log.file + +Configuration items related to the log file + +### `max-size` + ++ The maximum size of a single log file. When this value is exceeded, the system automatically splits the log into several files. ++ Default value: `300` ++ Unit: MiB ++ Minimum value: `1` + +### `max-days` + ++ The maximum number of days in which a log is kept ++ Default value: `28` ++ Minimum value: `1` + +### `max-backups` + ++ The maximum number of log files to keep ++ Default value: `7` ++ Minimum value: `1` + +## metric + +Configuration items related to monitoring + +### `interval` + ++ The interval at which monitoring metric data is pushed to Prometheus ++ Default value: `15s` + +## schedule + +Configuration items related to scheduling + +### `max-merge-region-size` + ++ Controls the size limit of `Region Merge`. When the Region size is greater than the specified value, PD does not merge the Region with the adjacent Regions. ++ Default value: `20` + +### `max-merge-region-keys` + ++ Specifies the upper limit of the `Region Merge` key. When the Region key is greater than the specified value, the PD does not merge the Region with its adjacent Regions. ++ Default value: `200000` + +### `patrol-region-interval` + ++ Controls the running frequency at which `replicaChecker` checks the health state of a Region. The smaller this value is, the faster `replicaChecker` runs. Normally, you do not need to adjust this parameter. ++ Default value: `100ms` + +### `split-merge-interval` + ++ Controls the time interval between the `split` and `merge` operations on the same Region. That means a newly split Region will not be merged for a while. ++ Default value: `1h` + +### `max-snapshot-count` + ++ Control the maximum number of snapshots that a single store receives or sends at the same time. PD schedulers depend on this configuration to prevent the resources used for normal traffic from being preempted. ++ Default value value: `3` + +### `max-pending-peer-count` + ++ Controls the maximum number of pending peers in a single store. PD schedulers depend on this configuration to prevent too many Regions with outdated logs from being generated on some nodes. ++ Default value: `16` + +### `max-store-down-time` + ++ The downtime after which PD judges that the disconnected store can not be recovered. When PD fails to receive the heartbeat from a store after the specified period of time, it adds replicas at other nodes. ++ Default value: `30m` + +### `leader-schedule-limit` + ++ The number of Leader scheduling tasks performed at the same time ++ Default value: `4` + +### `region-schedule-limit` + ++ The number of Region scheduling tasks performed at the same time ++ Default value: `2048` + +### `hot-region-schedule-limit` + ++ Controls the hot Region scheduling tasks that are running at the same time. It is independent of the Region scheduling. ++ Default value: `4` + +### `hot-region-cache-hits-threshold` + ++ The threshold used to set the number of minutes required to identify a hot Region. PD can participate in the hotspot scheduling only after the Region is in the hotspot state for more than this number of minutes. ++ Default value: `3` + +### `replica-schedule-limit` + ++ The number of Replica scheduling tasks performed at the same time ++ Default value: `64` + +### `merge-schedule-limit` + ++ The number of the `Region Merge` scheduling tasks performed at the same time. Set this parameter to `0` to disable `Region Merge`. ++ Default value: `8` + +### `high-space-ratio` + ++ The threshold ratio below which the capacity of the store is sufficient. If the space occupancy ratio of the store is smaller than this threshold value, PD ignores the remaining space of the store when performing scheduling, and balances load mainly based on the Region size. This configuration takes effect only when `region-score-formula-version` is set to `v1`. ++ Default value: `0.7` ++ Minimum value: greater than `0` ++ Maximum value: less than `1` + +### `low-space-ratio` + ++ The threshold ratio above which the capacity of the store is insufficient. If the space occupancy ratio of a store exceeds this threshold value, PD avoids migrating data to this store as much as possible. Meanwhile, to avoid the disk space of the corresponding store being exhausted, PD performs scheduling mainly based on the remaining space of the store. ++ Default value: `0.8` ++ Minimum value: greater than `0` ++ Maximum value: less than `1` + +### `tolerant-size-ratio` + ++ Controls the `balance` buffer size ++ Default value: `0` (automatically adjusts the buffer size) ++ Minimum value: `0` + +### `enable-cross-table-merge` + ++ Determines whether to enable the merging of cross-table Regions ++ Default value: `true` + +### `region-score-formula-version` + ++ Controls the version of the Region score formula ++ Default value: `v2` ++ Optional values: `v1` and `v2` + +### `disable-remove-down-replica` + ++ Determines whether to disable the feature that automatically removes `DownReplica`. When this parameter is set to `true`, PD does not automatically clean up the copy in the down state. ++ Default value: `false` + +### `disable-replace-offline-replica` + ++ Determines whether to disable the feature that migrates `OfflineReplica`. When this parameter is set to `true`, PD does not migrate the replicas in the offline state. ++ Default value: `false` + +### `disable-make-up-replica` + ++ Determines whether to disable the feature that automatically supplements replicas. When this parameter is set to `true`, PD does not supplement replicas for the Region with insufficient replicas. ++ Default value: `false` + +### `disable-remove-extra-replica` + ++ Determines whether to disable the feature that removes extra replicas. When this parameter is set to `true`, PD does not remove the extra replicas from the Region with excessive replicas. ++ Default value: `false` + +### `disable-location-replacement` + ++ Determines whether to disable isolation level check. When this parameter is set to `true`, PD does not increase the isolation level of the Region replicas through scheduling. ++ Default value: `false` + +### `store-balance-rate` + ++ Determines the maximum number of operations related to adding peers within a minute ++ Type: Integer ++ Default value: `15` ++ Minimum value: `0` ++ Maximum value: `200` + +### `enable-joint-consensus` + ++ Controls whether to use Joint Consensus for replica scheduling. If this configuration is disabled, PD schedules one replica at a time. ++ Default value: `true` + +## replication + +Configuration items related to replicas + +### `max-replicas` + ++ The number of replicas ++ Default value: `3` + +### `location-labels` + ++ The topology information of a TiKV cluster ++ Default value: `[]` ++ [Cluster topology configuration](/schedule-replicas-by-topology-labels.md) + +### `isolation-level` + ++ The minimum topological isolation level of a TiKV cluster ++ Default value: `""` ++ [Cluster topology configuration](/schedule-replicas-by-topology-labels.md) + +### `strictly-match-label` + ++ Enables the strict check for whether the TiKV label matches PD's `location-labels`. ++ Default value: `false` + +### `enable-placement-rules` + ++ Enables `placement-rules`. ++ Default value: `false` ++ See [Placement Rules](https://docs.pingcap.com/tidb/stable/configure-placement-rules). ++ An experimental feature of TiKV 4.0. + +## label-property + +Configuration items related to labels + +### `key` + ++ The label key for the store that rejected the Leader ++ Default value: `""` + +### `value` + ++ The label value for the store that rejected the Leader ++ Default value: `""` + +## replication-mode + +Configuration items related to the replication mode of all Regions. See [Enable synchronous replication in PD configuration file](https://docs.pingcap.com/tidb/stable/synchronous-replication#enable-synchronous-replication-in-the-pd-configuration-file) for details. diff --git a/content/docs/7.1/deploy/configure/raftstore.md b/content/docs/7.1/deploy/configure/raftstore.md new file mode 100644 index 00000000..abe3a373 --- /dev/null +++ b/content/docs/7.1/deploy/configure/raftstore.md @@ -0,0 +1,11 @@ +--- +title: Raftstore Config +description: Learn how to configure Raftstore in TiKV. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 10 + identifier: Raftstore Config-7.1 +--- + +You can find all the configuration parameters related to Raftstore [here](../tikv-configuration-file/#raftstore). diff --git a/content/docs/7.1/deploy/configure/region-merge.md b/content/docs/7.1/deploy/configure/region-merge.md new file mode 100644 index 00000000..d96d0ea1 --- /dev/null +++ b/content/docs/7.1/deploy/configure/region-merge.md @@ -0,0 +1,38 @@ +--- +title: Region Merge Config +description: Learn how to configure Region Merge in TiKV. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 7 + identifier: Region Merge Config-7.1 +--- + +TiKV shards continuous ranges of keys into Regions, and replicates Regions through the Raft protocol. When data size increases until reaching a threshold, a Region splits into multiple. Conversely, if the size of the Region shrinks due to data deletion, two adjacent Regions can be merged into one. + +## Region Merge + +The Region Merge process is initiated by PD as follows: + +1. PD polls the status of the Regions by the interval. + +2. PD ensures all replicas of the two Regions to be merged must be stored on the same set of TiKV(s). + +3. If the sizes of two adjacent regions are both less than `max-merge-region-size` and the numbers of keys within the regions are both less than `max-merge-region-keys`, PD starts the Region Merge process that merges the bigger region into the smaller region. + +## Configure Region Merge + +You can use `pd-ctl` or the PD configuration file to configure Region Merge. + +The Region Merge feature is enabled by default. To disable Region Merge, you need to set the following parameters to zero: + +- `max-merge-region-size` +- `max-merge-region-keys` +- `merge-schedule-limit` + +{{< info >}} +- Newly split Regions are not merged within the period specified by `split-merge-interval`. +- Region Merge does not happen within the period specified by `split-merge-interval` after PD starts or restarts. +{{< /info >}} + +For more information of other configuration parameters about scheduling, see [Scheduling-related parameters](../pd-configuration-file/#schedule). diff --git a/content/docs/7.1/deploy/configure/rocksdb.md b/content/docs/7.1/deploy/configure/rocksdb.md new file mode 100644 index 00000000..5e9a4e99 --- /dev/null +++ b/content/docs/7.1/deploy/configure/rocksdb.md @@ -0,0 +1,33 @@ +--- +title: RocksDB Config +description: Learn how to configure RocksDB engine in TiKV. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 8 + identifier: RocksDB Config-7.1 +--- + +TiKV uses [RocksDB](https://rocksdb.org/) internally to store Raft logs and key-value pairs. + +TiKV creates two RocksDB instances on each Node: + +* One `rocksdb` instance that stores key-value data. +* One `raftdb` instance that stores Raft logs and has a single column family called `raftdb.defaultcf`. + +The `rocksdb` instance has three column families: + +Column family | Purpose +:-------------|:------- +`rocksdb.defaultcf` | Stores actual KV pairs for TiKV +`rocksdb.lockcf` | Stores transaction lock +`rocksdb.writecf` | Stores transactions' commits and rollback records + +RocksDB can be configured on each column family. Here is an example: + +```toml +[rocksdb.writecf] +whole-key-filtering = false +``` + +For more information about the RocksDB configuration parameters, see [RocksDB-related parameters](../tikv-configuration-file/#rocksdb). diff --git a/content/docs/7.1/deploy/configure/security.md b/content/docs/7.1/deploy/configure/security.md new file mode 100644 index 00000000..fa02ae01 --- /dev/null +++ b/content/docs/7.1/deploy/configure/security.md @@ -0,0 +1,168 @@ +--- +title: Security Config +description: Keep your TiKV secure +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 5 + identifier: Security Config-7.1 +--- + +This document describes how to use Transport Layer Security (TLS) to encrypt the connections between TiKV nodes. + +## Transport Layer Security + +Transport Layer Security is a standard protocol designed to protect network communications from network tampering or inspection. TiKV uses OpenSSL, an industry-standard toolkit for TLS, to implement its TLS encryption. + +It is necessary to use TLS when TiKV is being deployed or accessed from outside of a secure Virtual Local Area Network (VLAN), such as the network across a Wide Area Network (WAN, also refers to a public internet), the network that is a part of an untrusted data center network, and the network where other untrustworthy users or services are active. + +## Preparation + +Before getting started, you need to check your infrastructure. Your organization might already use tools like the [Kubernetes certificates API](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) to issue certificates. To successfully encrypt the connections between TiKV nodes, prepare the following certificates and keys: + +- A **Certificate Authority** (CA) certificate +- Individual unique **certificates** and **keys** for each TiKV service and PD service +- One or many **certificates** and **keys** for TiKV clients depending on your needs. + + If you already have them, you can skip the [optional section](#optional-generate-a-test-certificate-chain) below. + +If your organization does not yet have a public key infrastructure (PKI), you can create a simple CA to issue certificates for the services in your deployment by following the below instructions: + +### Optional: Generate a test certificate chain + +You need to prepare certificates for each TiKV and Placement Driver (PD) node to be involved with the cluster. It is recommended to prepare a separate server certificate for TiKV and PD and ensure that they can authenticate each other. The clients of TiKV and PD can share one client certificate. + +You can use multiple tools to generate self-signed certificates, such as `openssl`, `easy-rsa`, and `cfssl`. + +Here is an example of generating self-signed certificates using [`easyrsa`](https://github.com/OpenVPN/easy-rsa/): + +```bash +#! /bin/bash +set +e + +mkdir -p easyrsa +cd easyrsa +curl -L https://github.com/OpenVPN/easy-rsa/releases/download/v3.0.6/EasyRSA-unix-v3.0.6.tgz \ + | tar xzv --strip-components=1 + +./easyrsa init-pki \ + && ./easyrsa build-ca nopass + +NUM_PD_NODES=3 +for i in $(seq 1 $NUM_PD_NODES); do + ./easyrsa gen-req pd$i nopass + ./easyrsa sign-req server pd$i +done + +NUM_TIKV_NODES=3 +for i in $(seq 1 $NUM_TIKV_NODES); do + ./easyrsa gen-req tikv$i nopass + ./easyrsa sign-req server tikv$i +done + +./easyrsa gen-req client nopass +./easyrsa sign-req server client +``` + +When running this script, you need to answer some questions and make some confirmations interactively. For the CA common name, you can use any desired name. While for the PD and TiKV nodes, you need to use the hostnames. + +If you see the following output, it means that the script runs successfully: + +```bash +$ ls easyrsa/pki/{ca.crt,issued,private} +easyrsa/pki/ca.crt + +easyrsa/pki/issued: +client.crt pd1.crt pd2.crt pd3.crt tikv1.crt tikv2.crt tikv3.crt + +easyrsa/pki/private: +ca.key client.key pd1.key pd2.key pd3.key tikv1.key tikv2.key tikv3.key +``` + +## Step 1. Configure the TiKV server certificates + +You need to set the certificates in the TiKV configuration file: + +```toml +# Using empty strings here means disabling secure connections. +[security] +# The path to the file that contains the PEM encoding of the server’s CA certificates. +ca-path = "/path/to/ca.pem" +# The path to the file that contains the PEM encoding of the server’s certificate chain. +cert-path = "/path/to/tikv-server-cert.pem" +# The path to the file that contains the PEM encoding of the server’s private key. +key-path = "/path/to/tikv-server-key.pem" +# The name list used to verify the common name in client’s certificates. Verification is +# not enabled if this field is empty. +cert-allowed-cn = ["tikv-server", "pd-server"] +``` + +Besides, the **connection URL should be changed to `https://`** instead of a plain `ip:port`. + +For the information about all TLS configuration parameters of TiKV, see [TiKV security-related parameters](../tikv-configuration-file/#security). + +## Step 2. Configure the PD certificates + +You need to set the certificates in the PD configuration file: + +```toml +[security] +# The path to the file that contains the PEM encoding of the server’s CA certificates. +cacert-path = "/path/to/ca.pem" +# The path to the file that contains the PEM encoding of the server’s certificate chain. +cert-path = "/path/to/pd-server-cert.pem" +# The path to the file that contains the PEM encoding of the server’s private key. +key-path = "/path/to/pd-server-key.pem" +# The name list used to verify the common name in client’s certificates. Verification is +# not enabled if this field is empty. +cert-allowed-cn = ["tikv-server", "pd-server"] +``` + +Besides, the **connection URL should be changed to `https://`** instead of a plain `ip:port`. + +For the information about all TLS configuration parameters of PD, see [PD security-related parameters](../pd-configuration-file/#security). + +## Step 3. Configure the TiKV client + +You need to set TLS options for the TiKV client to connect to TiKV. Taking [Rust Client](https://github.com/tikv/client-rust) as an example, the TLS options are set as follows: + +```rust +let config = Config::new(/* ... */).with_security( + // The path to the file that contains the PEM encoding of the server’s CA certificates. + "/path/to/ca.pem", + // The path to the file that contains the PEM encoding of the server’s certificate chain. + "/path/to/client-cert.pem", + // The path to the file that contains the PEM encoding of the server’s private key. + "/path/to/client-key.pem" +); +``` + +Besides, the **connection URL should be changed to `https://`** instead of a plain `ip:port`. + +{{< warning >}} +Currently, TiKV Java Client does not support TLS. +{{< /warning >}} + +## Step 4. Connect TiKV using `tikv-ctl` and `pd-ctl` + +To use `pd-ctl` and `tikv-ctl`, set the relevant options as follows: + +```bash +pd-ctl \ + --pd "https://127.0.0.1:2379" \ + # The path to the file that contains the PEM encoding of the server’s CA certificates. + --cacert "/path/to/ca.pem" \ + # The path to the file that contains the PEM encoding of the server’s certificate chain. + --cert "/path/to/client.pem" \ + # The path to the file that contains the PEM encoding of the server’s private key. + --key "/path/to/client-key.pem" + +tikv-ctl \ + --host "127.0.0.1:20160" \ + # The path to the file that contains the PEM encoding of the server’s CA certificates. + --ca-path "/path/to/ca.pem" \ + # The path to the file that contains the PEM encoding of the server’s certificate chain. + --cert-path "/path/to/client.pem" \ + # The path to the file that contains the PEM encoding of the server’s private key. + --key-path "/path/to/client-key.pem" +``` diff --git a/content/docs/7.1/deploy/configure/storage.md b/content/docs/7.1/deploy/configure/storage.md new file mode 100644 index 00000000..72acd614 --- /dev/null +++ b/content/docs/7.1/deploy/configure/storage.md @@ -0,0 +1,12 @@ +--- +title: Storage Config +description: Learn how to configure storage in TiKV. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 12 + identifier: Storage Config-7.1 +--- + + +You can find all the configuration parameters related to Storage [here](../tikv-configuration-file/#storage). diff --git a/content/docs/7.1/deploy/configure/tikv-command-line.md b/content/docs/7.1/deploy/configure/tikv-command-line.md new file mode 100644 index 00000000..d9d0c467 --- /dev/null +++ b/content/docs/7.1/deploy/configure/tikv-command-line.md @@ -0,0 +1,76 @@ +--- +title: TiKV Command Line Parameters +description: Learn some configuration flags of TiKV +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 3 + identifier: TiKV Command Line Parameters-7.1 +--- + +TiKV supports readable units in command line parameters. + +- File size (bytes by default, case-insensitive): KB, MB, GB, TB, PB +- Time (ms by default): ms, s, m, h + +## `-A, --addr` + +- The address that the TiKV server monitors +- Default: "127.0.0.1:20160" +- To deploy a cluster, you must use `--addr` to specify the IP address of the current host, such as "192.168.100.113:20160". If the cluster is running on Docker, specify the IP address of Docker as "0.0.0.0:20160". + +## `--advertise-addr` + +- The server advertise address for client traffic from outside +- Default: ${addr} +- If the client cannot connect to TiKV through the `--addr` address because of Docker or NAT network, you must manually set the `--advertise-addr` address. +- For example, the internal IP address of Docker is "172.17.0.1", while the IP address of the host is "192.168.100.113" and the port mapping is set to "-p 20160:20160". In this case, you can set `--advertise-addr` to "192.168.100.113:20160". The client can find this service through "192.168.100.113:20160". + +## `--status-addr` + ++ The port through which the TiKV service status is listened ++ Default: "20180" ++ The Prometheus can access this status information via "http://host:status_port/metrics". ++ The Profile can access this status information via "http://host:status_port/debug/pprof/profile". + +## `--advertise-status-addr` + +- The address through which TiKV accesses service status from outside. +- Default: The value of `--status-addr` is used. +- If the client cannot connect to TiKV through the `--status-addr` address because of Docker or NAT network, you must manually set the `--advertise-status-addr` address. +- For example, the internal IP address of Docker is "172.17.0.1", while the IP address of the host is "192.168.100.113" and the port mapping is set to "-p 20180:20180". In this case, you can set `--advertise-status-addr="192.168.100.113:20180"`. The client can find this service through "192.168.100.113:20180". + +## `-C, --config` + +- The config file +- Default: "" +- If you set the configuration using the command line, the same setting in the config file is overwritten. + +## `--capacity` + +- The store capacity +- Default: 0 (unlimited) +- PD uses this parameter to determine how to balance TiKV servers. (Tip: you can use 10GB instead of 1073741824) + +## `--data-dir` + +- The path to the data directory +- Default: "/tmp/tikv/store" + +## `-L` + +- The log level +- Default: "info" +- You can choose from trace, debug, info, warn, error, or off. + +## `--log-file` + +- The log file +- Default: "" +- If this parameter is not set, logs are written to stderr. Otherwise, logs are stored in the log file which will be automatically rotated every day. + +## `--pd` + +- The address list of PD servers +- Default: "" +- To make TiKV work, you must use the value of `--pd` to connect the TiKV server to the PD server. Separate multiple PD addresses using comma, for example "192.168.100.113:2379, 192.168.100.114:2379, 192.168.100.115:2379". diff --git a/content/docs/7.1/deploy/configure/tikv-configuration-file.md b/content/docs/7.1/deploy/configure/tikv-configuration-file.md new file mode 100644 index 00000000..02cd22a6 --- /dev/null +++ b/content/docs/7.1/deploy/configure/tikv-configuration-file.md @@ -0,0 +1,1326 @@ +--- +title: TiKV Config +description: Learn the TiKV configuration file +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 4 + identifier: TiKV Config-7.1 +--- + +The TiKV configuration file supports more options than command-line parameters. You can find the default configuration file in [etc/config-template.toml](https://github.com/tikv/tikv/blob/release-5.0/etc/config-template.toml) and rename it to `config.toml`. + +This document only describes the parameters that are not included in command-line parameters. For more details, see [command-line parameter](../tikv-command-line). + +## server + +Configuration parameters related to the server. + +### `status-thread-pool-size` + ++ The number of worker threads for the `HTTP` API service ++ Default value: `1` ++ Minimum value: `1` + +### `grpc-compression-type` + ++ The compression algorithm for gRPC messages ++ Optional values: `"none"`, `"deflate"`, `"gzip"` ++ Default value: `"none"` + +### `grpc-concurrency` + ++ The number of gRPC worker threads ++ Default value: `5` ++ Minimum value: `1` + +### `grpc-concurrent-stream` + ++ The maximum number of concurrent requests allowed in a gRPC stream ++ Default value: `1024` ++ Minimum value: `1` + +### `grpc-memory-pool-quota` + ++ Limit the memory size that can be used by gRPC ++ Default: No limit ++ Limit the memory in case OOM is observed. Note that limit the usage can lead to potential stall + +### `grpc-raft-conn-num` + ++ The maximum number of links among TiKV nodes for Raft communication ++ Default: `1` ++ Minimum value: `1` + +### `grpc-stream-initial-window-size` + ++ The window size of the gRPC stream ++ Default: 2MB ++ Unit: KB|MB|GB ++ Minimum value: `"1KB"` + +### `grpc-keepalive-time` + ++ The time interval at which that gRPC sends `keepalive` Ping messages ++ Default: `"10s"` ++ Minimum value: `"1s"` + +### `grpc-keepalive-timeout` + ++ Disables the timeout for gRPC streams ++ Default: `"3s"` ++ Minimum value: `"1s"` + +### `concurrent-send-snap-limit` + ++ The maximum number of snapshots sent at the same time ++ Default value: `32` ++ Minimum value: `1` + +### `concurrent-recv-snap-limit` + ++ The maximum number of snapshots received at the same time ++ Default value: `32` ++ Minimum value: `1` + +### `end-point-recursion-limit` + ++ The maximum number of recursive levels allowed when TiKV decodes the Coprocessor DAG expression ++ Default value: `1000` ++ Minimum value: `1` + +### `end-point-request-max-handle-duration` + ++ The longest duration allowed for a client's push down request to TiKV for processing tasks ++ Default value: `"60s"` ++ Minimum value: `"1s"` + +### `snap-max-write-bytes-per-sec` + ++ The maximum allowable disk bandwidth when processing snapshots ++ Default value: `"100MB"` ++ Unit: KB|MB|GB ++ Minimum value: `"1KB"` + +### `end-point-slow-log-threshold` + ++ The time threshold for a client's push down request to print slow log ++ Default value: `"1s"` ++ Minimum value: `0` + +## readpool.unified + +Configuration parameters related to the single thread pool serving read requests. This thread pool supersedes the original storage thread pool and coprocessor thread pool since the 4.0 version. + +### `min-thread-count` + ++ The minimal working thread count of the unified read pool ++ Default value: `1` + +### `max-thread-count` + ++ The maximum working thread count of the unified read pool ++ Default value: `MAX(4, CPU * 0.8)` + +### `stack-size` + ++ The stack size of the threads in the unified thread pool ++ Type: Integer + Unit ++ Default value: `"10MB"` ++ Unit: KB|MB|GB ++ Minimum value: `"2MB"` ++ Maximum value: The number of Kbytes output in the result of the `ulimit -sH` command executed in the system. + +### `max-tasks-per-worker` + ++ The maximum number of tasks allowed for a single thread in the unified read pool. `Server Is Busy` is returned when the value is exceeded. ++ Default value: `2000` ++ Minimum value: `2` + +## readpool.storage + +Configuration parameters related to storage thread pool. + +### `use-unified-pool` + ++ Determines whether to use the unified thread pool (configured in [`readpool.unified`](#readpoolunified)) for storage requests. If the value of this parameter is `false`, a separate thread pool is used, which is configured through the rest parameters in this section (`readpool.storage`). ++ Default value: If this section (`readpool.storage`) has no other configurations, the default value is `true`. Otherwise, for the backward compatibility, the default value is `false`. Change the configuration in [`readpool.unified`](#readpoolunified) as needed before enabling this option. + +### `high-concurrency` + ++ The allowable number of concurrent threads that handle high-priority `read` requests ++ When `8` ≤ `cpu num` ≤ `16`, the default value is `cpu_num * 0.5`; when `cpu num` is greater than `8`, the default value is `4`; when `cpu num` is greater than `16`, the default value is `8`. ++ Minimum value: `1` + +### `normal-concurrency` + ++ The allowable number of concurrent threads that handle normal-priority `read` requests ++ When `8` ≤ `cpu num` ≤ `16`, the default value is `cpu_num * 0.5`; when `cpu num` is greater than `8`, the default value is `4`; when `cpu num` is greater than `16`, the default value is `8`. ++ Minimum value: `1` + +### `low-concurrency` + ++ The allowable number of concurrent threads that handle low-priority `read` requests ++ When `8` ≤ `cpu num` ≤ `16`, the default value is `cpu_num * 0.5`; when `cpu num` is greater than `8`, the default value is `4`; when `cpu num` is greater than `16`, the default value is `8`. ++ Minimum value: `1` + +### `max-tasks-per-worker-high` + ++ The maximum number of tasks allowed for a single thread in a high-priority thread pool. `Server Is Busy` is returned when the value is exceeded. ++ Default value: `2000` ++ Minimum value: `2` + +### `max-tasks-per-worker-normal` + ++ The maximum number of tasks allowed for a single thread in a normal-priority thread pool. `Server Is Busy` is returned when the value is exceeded. ++ Default value: `2000` ++ Minimum value: `2` + +### `max-tasks-per-worker-low` + ++ The maximum number of tasks allowed for a single thread in a low-priority thread pool. `Server Is Busy` is returned when the value is exceeded. ++ Default value: `2000` ++ Minimum value: `2` + +### `stack-size` + ++ The stack size of threads in the Storage read thread pool ++ Type: Integer + Unit ++ Default value: `"10MB"` ++ Unit: KB|MB|GB ++ Minimum value: `"2MB"` ++ Maximum value: The number of Kbytes output in the result of the `ulimit -sH` command executed in the system. + +## readpool.coprocessor + +Configuration parameters related to the Coprocessor thread pool. + +### `use-unified-pool` + ++ Determines whether to use the unified thread pool (configured in [`readpool.unified`](#readpoolunified)) for coprocessor requests. If the value of this parameter is `false`, a separate thread pool is used, which is configured through the rest parameters in this section (`readpool.coprocessor`). ++ Default value: If none of the parameters in this section (`readpool.coprocessor`) are set, the default value is `true`. Otherwise, the default value is `false` for the backward compatibility. Adjust the configuration parameter in [`readpool.unified`](#readpoolunified) before enabling this parameter. + +### `high-concurrency` + ++ The allowable number of concurrent threads that handle high-priority Coprocessor requests, such as checkpoints ++ Default value: `CPU * 0.8` ++ Minimum value: `1` + +### `normal-concurrency` + ++ The allowable number of concurrent threads that handle normal-priority Coprocessor requests ++ Default value: `CPU * 0.8` ++ Minimum value: `1` + +### `low-concurrency` + ++ The allowable number of concurrent threads that handle low-priority Coprocessor requests, such as table scan ++ Default value: `CPU * 0.8` ++ Minimum value: `1` + +### `max-tasks-per-worker-high` + ++ The number of tasks allowed for a single thread in a high-priority thread pool. When this number is exceeded, `Server Is Busy` is returned. ++ Default value: `2000` ++ Minimum value: `2` + +### `max-tasks-per-worker-normal` + ++ The number of tasks allowed for a single thread in a normal-priority thread pool. When this number is exceeded, `Server Is Busy` is returned. ++ Default value: `2000` ++ Minimum value: `2` + +### `max-tasks-per-worker-low` + ++ The number of tasks allowed for a single thread in a low-priority thread pool. When this number is exceeded, `Server Is Busy` is returned. ++ Default value: `2000` ++ Minimum value: `2` + +### `stack-size` + ++ The stack size of the thread in the Coprocessor thread pool ++ Type: Integer + Unit ++ Default value: `"10MB"` ++ Unit: KB|MB|GB ++ Minimum value: `"2MB"` ++ Maximum value: The number of Kbytes output in the result of the `ulimit -sH` command executed in the system. + +## storage + +Configuration parameters related to storage. + +### `scheduler-concurrency` + ++ A built-in memory lock mechanism to prevent simultaneous operations on a key. Each key has a hash in a different slot. ++ Default value: `524288` ++ Minimum value: `1` + +### `scheduler-worker-pool-size` + ++ The number of `scheduler` threads, mainly used for checking transaction consistency before data writing. If the number of CPU cores is greater than or equal to `16`, the default value is `8`; otherwise, the default value is `4`. ++ Default value: `4` ++ Minimum value: `1` + +### `scheduler-pending-write-threshold` + ++ The maximum size of the write queue. A `Server Is Busy` error is returned for a new write to TiKV when this value is exceeded. ++ Default value: `"100MB"` ++ Unit: MB|GB + +### `reserve-space` + ++ The size of the temporary file that preoccupies the extra space when TiKV is started. The name of temporary file is `space_placeholder_file`, located in the `storage.data-dir` directory. When TiKV runs out of disk space and cannot be started normally, you can delete this file as an emergency intervention and set `reserve-space` to `"0MB"`. ++ Default value: `"5GB"` ++ Unite: MB|GB + +### `enable-ttl` + ++ TTL is short for "Time to live". If this parameter is enabled, TiKV automatically deletes data that reaches its TTL. To set the value of TTL, you need to specify it in the requests when writing data via the client. If the TTL is not specified, it means that TiKV does not automatically delete the corresponding data. ++ Note: The TTL feature is only available for the RawKV interface for now. You can only configure this feature when creating a new cluster because TTL uses different data formats in the storage layer. If you modify this parameter on an existing cluster, TiKV reports errors when it starts. ++ Default value: `false` + +### `ttl-check-poll-interval` + ++ The interval of checking data to reclaim physical spaces. If data reaches its TTL, TiKV forcibly reclaims its physical space during the check. ++ Default value: `"12h"` ++ Minimum value: `"0s"` + +## storage.block-cache + +Configuration parameters related to the sharing of block cache among multiple RocksDB Column Families (CF). When these configuration parameters are enabled, block cache separately configured for each column family is disabled. + +### `shared` + ++ Enables or disables the sharing of block cache. ++ Default value: `true` + +### `capacity` + ++ The size of the shared block cache. ++ Default value: 45% of the size of total system memory ++ Unit: KB|MB|GB + +## storage.io-rate-limit + +Configuration parameters related to I/O rate limiter. + +### `max-bytes-per-sec` + ++ Limits the maximum I/O bytes that a server can write to or read from the disk (determined by the `mode` configuration parameter below) in one second. When this limit is reached, TiKV prefers throttling background operations over foreground ones. The value of this configuration parameter should be set to the disk's optimal I/O bandwidth, for example, the maximum I/O bandwidth specified by your cloud disk vendor. When this configuration value is set to zero, disk I/O operations are not limited. ++ Default value: `"0MB"` + +### `mode` + ++ Determines which types of I/O operations are counted and restrained below the `max-bytes-per-sec` threshold. Currently, only the write-only mode is supported. ++ Optional value: `"write-only"` ++ Default value: `"write-only"` + +## raftstore + +Configuration parameters related to raftstore. + +### `prevote` + ++ Enables or disables `prevote`. Enabling this feature helps reduce jitter on the system after recovery from network partition. ++ Default value: `true` + +### `raftdb-path` + ++ The path to the Raft library, which is `storage.data-dir/raft` by default ++ Default value: "" + +### `raft-base-tick-interval` + ++ The time interval at which the Raft state machine ticks ++ Default value: `"1s"` ++ Minimum value: greater than `0` + +### `raft-heartbeat-ticks` + ++ The number of passed ticks when the heartbeat is sent. This means that a heartbeat is sent at the time interval of `raft-base-tick-interval` * `raft-heartbeat-ticks`. ++ Default value: `2` ++ Minimum value: greater than `0` + +### `raft-election-timeout-ticks` + ++ The number of passed ticks when Raft election is initiated. This means that if Raft group is missing the leader, a leader election is initiated approximately after the time interval of `raft-base-tick-interval` * `raft-election-timeout-ticks`. ++ Default value: `10` ++ Minimum value: `raft-heartbeat-ticks` + +### `raft-min-election-timeout-ticks` + ++ The minimum number of ticks during which the Raft election is initiated. If the number is `0`, the value of `raft-election-timeout-ticks` is used. The value of this parameter must be greater than or equal to `raft-election-timeout-ticks`. ++ Default value: `0` ++ Minimum value: `0` + +### `raft-max-election-timeout-ticks` + ++ The maximum number of ticks during which the Raft election is initiated. If the number is `0`, the value of `raft-election-timeout-ticks` * `2` is used. ++ Default value: `0` ++ Minimum value: `0` + +### `raft-max-size-per-msg` + ++ The soft limit on the size of a single message packet ++ Default value: `"1MB"` ++ Minimum value: `0` ++ Unit: MB + +### `raft-max-inflight-msgs` + ++ The number of Raft logs to be confirmed. If this number is exceeded, log sending slows down. ++ Default value: `256` ++ Minimum value: greater than `0` + +### `raft-entry-max-size` + ++ The hard limit on the maximum size of a single log ++ Default value: `"8MB"` ++ Minimum value: `0` ++ Unit: MB|GB + +### `raft-log-gc-tick-interval` + ++ The time interval at which the polling task of deleting Raft logs is scheduled. `0` means that this feature is disabled. ++ Default value: `"10s"` ++ Minimum value: `0` + +### `raft-log-gc-threshold` + ++ The soft limit on the maximum allowable count of residual Raft logs ++ Default value: `50` ++ Minimum value: `1` + +### `raft-log-gc-count-limit` + ++ The hard limit on the allowable number of residual Raft logs ++ Default value: the log number that can be accommodated in the 3/4 Region size (calculated as 1MB for each log) ++ Minimum value: `0` + +### `raft-log-gc-size-limit` + ++ The hard limit on the allowable size of residual Raft logs ++ Default value: 3/4 of the Region size ++ Minimum value: greater than `0` + +### `raft-entry-cache-life-time` + ++ The maximum remaining time allowed for the log cache in memory. ++ Default value: `"30s"` ++ Minimum value: `0` + +### `raft-reject-transfer-leader-duration` + ++ The protection time for new nodes, which is used to control the shortest interval to migrate a leader to the newly added node. Setting this value too small might cause the failure of leader transfer. ++ Default value: `"3s"` ++ Minimum value: `0` + +### `hibernate-regions` (**Experimental**) + ++ Enables or disables Hibernate Region. When this option is enabled, a Region idle for a long time is automatically set as hibernated. This reduces the extra overhead caused by heartbeat messages between the Raft leader and the followers for idle Regions. You can use `raftstore.peer-stale-state-check-interval` to modify the heartbeat interval between the leader and the followers of hibernated Regions. ++ Default value: true + +### `raftstore.peer-stale-state-check-interval` + ++ Modifies the state check interval for Regions. ++ Default value: 5 min + +### `split-region-check-tick-interval` + ++ Specifies the interval at which to check whether the Region split is needed. `0` means that this feature is disabled. ++ Default value: `"10s"` ++ Minimum value: `0` + +### `region-split-check-diff` + ++ The maximum value by which the Region data is allowed to exceed before Region split ++ Default value: 1/16 of the Region size. ++ Minimum value: `0` + +### `region-compact-check-interval` + ++ The time interval at which to check whether it is necessary to manually trigger RocksDB compaction. `0` means that this feature is disabled. ++ Default value: `"5m"` ++ Minimum value: `0` + +### `region-compact-check-step` + ++ The number of Regions checked at one time for each round of manual compaction ++ Default value: `100` ++ Minimum value: `0` + +### `region-compact-min-tombstones` + ++ The number of tombstones required to trigger RocksDB compaction ++ Default value: `10000` ++ Minimum value: `0` + +### `region-compact-tombstones-percent` + ++ The proportion of tombstone required to trigger RocksDB compaction ++ Default value: `30` ++ Minimum value: `1` ++ Maximum value: `100` + +### `pd-heartbeat-tick-interval` + ++ The time interval at which a Region's heartbeat to PD is triggered. `0` means that this feature is disabled. ++ Default value: `"1m"` ++ Minimum value: `0` + +### `pd-store-heartbeat-tick-interval` + ++ The time interval at which a store's heartbeat to PD is triggered. `0` means that this feature is disabled. ++ Default value: `"10s"` ++ Minimum value: `0` + +### `snap-mgr-gc-tick-interval` + ++ The time interval at which the recycle of expired snapshot files is triggered. `0` means that this feature is disabled. ++ Default value: `"1m"` ++ Minimum value: `0` + +### `snap-gc-timeout` + ++ The longest time for which a snapshot file is saved ++ Default value: `"4h"` ++ Minimum value: `0` + +### `lock-cf-compact-interval` + ++ The time interval at which TiKV triggers a manual compaction for the Lock Column Family ++ Default value: `"256MB"` ++ Default value: `"10m"` ++ Minimum value: `0` + +### `lock-cf-compact-bytes-threshold` + ++ The size out of which TiKV triggers a manual compaction for the Lock Column Family ++ Default value: `"256MB"` ++ Minimum value: `0` ++ Unit: MB + +### `notify-capacity` + ++ The longest length of the Region message queue. ++ Default value: `40960` ++ Minimum value: `0` + +### `messages-per-tick` + ++ The maximum number of messages processed per batch ++ Default value: `4096` ++ Minimum value: `0` + +### `max-peer-down-duration` + ++ The longest inactive duration allowed for a peer. A peer with timeout is marked as `down`, and PD tries to delete it later. ++ Default value: `"5m"` ++ Minimum value: `0` + +### `max-leader-missing-duration` + ++ The longest duration allowed for a peer to be in the state where a Raft group is missing the leader. If this value is exceeded, the peer verifies with PD whether the peer has been deleted. ++ Default value: `"2h"` ++ Minimum value: greater than `abnormal-leader-missing-duration` + +### `abnormal-leader-missing-duration` + ++ The longest duration allowed for a peer to be in the state where a Raft group is missing the leader. If this value is exceeded, the peer is seen as abnormal and marked in metrics and logs. ++ Default value: `"10m"` ++ Minimum value: greater than `peer-stale-state-check-interval` + +### `peer-stale-state-check-interval` + ++ The time interval to trigger the check for whether a peer is in the state where a Raft group is missing the leader. ++ Default value: `"5m"` ++ Minimum value: greater than `2 * election-timeout` + +### `leader-transfer-max-log-lag` + ++ The maximum number of missing logs allowed for the transferee during a Raft leader transfer ++ Default value: `128` ++ Minimum value: `10` + +### `snap-apply-batch-size` + ++ The memory cache size required when the imported snapshot file is written into the disk ++ Default value: `"10MB"` ++ Minimum value: `0` ++ Unit: MB + +### `consistency-check-interval` + ++ The time interval at which the consistency check is triggered. `0` means that this feature is disabled. ++ Default value: `"0s"` ++ Minimum value: `0` + +### `raft-store-max-leader-lease` + ++ The longest trusted period of a Raft leader ++ Default value: `"9s"` ++ Minimum value: `0` + +### `allow-remove-leader` + ++ Determines whether to allow deleting the main switch ++ Default value: `false` + +### `merge-max-log-gap` + ++ The maximum number of missing logs allowed when `merge` is performed ++ Default value: `10` ++ Minimum value: greater than `raft-log-gc-count-limit` + +### `merge-check-tick-interval` + ++ The time interval at which TiKV checks whether a Region needs merge ++ Default value: `"2s"` ++ Minimum value: greater than `0` + +### `use-delete-range` + ++ Determines whether to delete data from the `rocksdb delete_range` interface ++ Default value: `false` + +### `cleanup-import-sst-interval` + ++ The time interval at which the expired SST file is checked. `0` means that this feature is disabled. ++ Default value: `"10m"` ++ Minimum value: `0` + +### `local-read-batch-size` + ++ The maximum number of read requests processed in one batch ++ Default value: `1024` ++ Minimum value: greater than `0` + +### `apply-max-batch-size` + ++ The maximum number of requests for data flushing in one batch ++ Default value: `256` ++ Minimum value: greater than `0` + +### `apply-pool-size` + ++ The allowable number of threads in the pool that flushes data to storage ++ Default value: `2` ++ Minimum value: greater than `0` + +### `store-max-batch-size` + ++ The maximum number of requests processed in one batch ++ If `hibernate-regions` is enabled, the default value is `256`. If `hibernate-regions` is disabled, the default value is `1024`. ++ Minimum value: greater than `0` + +### `store-pool-size` + ++ The allowable number of threads that process Raft ++ Default value: `2` ++ Minimum value: greater than `0` + +### `future-poll-size` + ++ The allowable number of threads that drive `future` ++ Default value: `1` ++ Minimum value: greater than `0` + +## Coprocessor + +Configuration parameters related to Coprocessor. + +### `split-region-on-table` + ++ Determines whether to split Region by table. It is recommended for you to use the feature only with TiDB. ++ Default value: `false` + +### `batch-split-limit` + ++ The threshold of Region split in batches. Increasing this value speeds up Region split. ++ Default value: `10` ++ Minimum value: `1` + +### `region-max-size` + ++ The maximum size of a Region. When the value is exceeded, the Region splits into many. ++ Default value: `"144MB"` ++ Unit: KB|MB|GB + +### `region-split-size` + ++ The size of the newly split Region. This value is an estimate. ++ Default value: `"96MB"` ++ Unit: KB|MB|GB + +### `region-max-keys` + ++ The maximum allowable number of keys in a Region. When this value is exceeded, the Region splits into many. ++ Default value: `1440000` + +### `region-split-keys` + ++ The number of keys in the newly split Region. This value is an estimate. ++ Default value: `960000` + +## RocksDB + +Configuration parameters related to RocksDB. + +### `max-background-jobs` + ++ The number of background threads in RocksDB ++ Default value: `8` ++ Minimum value: `2` + +### `max-background-flushes` + ++ The maximum number of concurrent background memtable flush jobs ++ Default value: `2` ++ Minimum value: `1` + +### `max-sub-compactions` + ++ The number of sub-compaction operations performed concurrently in RocksDB ++ Default value: `3` ++ Minimum value: `1` + +### `max-open-files` + ++ The total number of files that RocksDB can open ++ Default value: `40960` ++ Minimum value: `-1` + +### `max-manifest-file-size` + ++ The maximum size of a RocksDB Manifest file ++ Default value: `"128MB"` ++ Minimum value: `0` ++ Unit: B|KB|MB|GB + +### `create-if-missing` + ++ Determines whether to automatically create a DB switch ++ Default value: `true` + +### `wal-recovery-mode` + ++ WAL recovery mode ++ Optional values: `0` (`TolerateCorruptedTailRecords`), `1` (`AbsoluteConsistency`), `2` (`PointInTimeRecovery`), `3` (`SkipAnyCorruptedRecords`) ++ Default value: `2` ++ Minimum value: `0` ++ Maximum value: `3` + +### `wal-dir` + ++ The directory in which WAL files are stored ++ Default value: `"/tmp/tikv/store"` + +### `wal-ttl-seconds` + ++ The living time of the archived WAL files. When the value is exceeded, the system deletes these files. ++ Default value: `0` ++ Minimum value: `0` ++ unit: second + +### `wal-size-limit` + ++ The size limit of the archived WAL files. When the value is exceeded, the system deletes these files. ++ Default value: `0` ++ Minimum value: `0` ++ Unit: B|KB|MB|GB + +### `enable-statistics` + ++ Determines whether to enable the statistics of RocksDB ++ Default value: `true` + +### `stats-dump-period` + ++ The interval at which statistics are output to the log. ++ Default value: `10m` + +### `compaction-readahead-size` + ++ The size of `readahead` when compaction is being performed ++ Default value: `0` ++ Minimum value: `0` ++ Unit: B|KB|MB|GB + +### `writable-file-max-buffer-size` + ++ The maximum buffer size used in WritableFileWrite ++ Default value: `"1MB"` ++ Minimum value: `0` ++ Unit: B|KB|MB|GB + +### `use-direct-io-for-flush-and-compaction` + ++ Determines whether to use `O_DIRECT` for both reads and writes in background flush and compactions ++ Default value: `false` + +### `rate-bytes-per-sec` + ++ The maximum rate permitted by RocksDB's compaction rate limiter ++ Default value: `10GB` ++ Minimum value: `0` ++ Unit: B|KB|MB|GB + +### `rate-limiter-mode` + ++ RocksDB's compaction rate limiter mode ++ Optional values: `1` (`ReadOnly`), `2` (`WriteOnly`), `3` (`AllIo`) ++ Default value: `2` ++ Minimum value: `1` ++ Maximum value: `3` + +### `rate-limiter-auto-tuned` + ++ Determines whether to automatically optimize the configuration of the RocksDB's compaction rate limiter based on recent workload. When this configuration is enabled, compaction pending bytes will be slightly higher than usual. ++ Default value: `true` + +### `enable-pipelined-write` + ++ Enables or disables Pipelined Write ++ Default value: `true` + +### `bytes-per-sync` + ++ The rate at which OS incrementally synchronizes files to disk while these files are being written asynchronously ++ Default value: `"1MB"` ++ Minimum value: `0` ++ Unit: B|KB|MB|GB + +### `wal-bytes-per-sync` + ++ The rate at which OS incrementally synchronizes WAL files to disk while the WAL files are being written ++ Default value: `"512KB"` ++ Minimum value: `0` ++ Unit: B|KB|MB|GB + +### `info-log-max-size` + ++ The maximum size of Info log ++ Default value: `"1GB"` ++ Minimum value: `0` ++ Unit: B|KB|MB|GB + +### `info-log-roll-time` + ++ The time interval at which Info logs are truncated. If the value is `0s`, logs are not truncated. ++ Default value: `"0s"` + +### `info-log-keep-log-file-num` + ++ The maximum number of kept log files ++ Default value: `10` ++ Minimum value: `0` + +### `info-log-dir` + ++ The directory in which logs are stored ++ Default value: "" + +## rocksdb.titan + +Configuration parameters related to Titan. + +### `enabled` + ++ Enables or disables Titan ++ Default value: `false` + +### `dirname` + ++ The directory in which the Titan Blob file is stored ++ Default value: `"titandb"` + +### `disable-gc` + ++ Determines whether to disable Garbage Collection (GC) that Titan performs to Blob files ++ Default value: `false` + +### `max-background-gc` + ++ The maximum number of GC threads in Titan ++ Default value: `4` ++ Minimum value: `1` + +## rocksdb.defaultcf + +Configuration parameters related to `rocksdb.defaultcf`. + +### `block-size` + ++ The default size of a RocksDB block ++ Default value: `"64KB"` ++ Minimum value: `"1KB"` ++ Unit: KB|MB|GB + +### `block-cache-size` + ++ The cache size of a RocksDB block ++ Default value: `Total machine memory * 25%` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `disable-block-cache` + ++ Enables or disables block cache ++ Default value: `false` + +### `cache-index-and-filter-blocks` + ++ Enables or disables caching index and filter ++ Default value: `true` + +### `pin-l0-filter-and-index-blocks` + ++ Determines whether to pin the index and filter at L0 ++ Default value: `true` + +### `use-bloom-filter` + ++ Enables or disables bloom filter ++ Default value: `true` + +### `optimize-filters-for-hits` + ++ Determines whether to optimize the hit ratio of filters ++ Default value: `true` + +### `whole_key_filtering` + ++ Determines whether to put the entire key to bloom filter ++ Default value: `true` + +### `bloom-filter-bits-per-key` + ++ The length that bloom filter reserves for each key ++ Default value: `10` ++ unit: byte + +### `block-based-bloom-filter` + ++ Determines whether each block creates a bloom filter ++ Default value: `false` + +### `read-amp-bytes-per-bit` + ++ Enables or disables statistics of read amplification. ++ Optional values: `0` (disabled), > `0` (enabled). ++ Default value: `0` ++ Minimum value: `0` + +### `compression-per-level` + ++ The default compression algorithm for each level ++ Optional values: ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] ++ Default value: `No` for the first two levels, and `lz4` for the next five levels + +### `bottommost-level-compression` + ++ Sets the compression algorithm of the bottommost layer. This configuration parameter overrides the `compression-per-level` setting. ++ Ever since data is written to LSM-tree, RocksDB does not directly adopt the last compression algorithm specified in the `compression-per-level` array for the bottommost layer. `bottommost-level-compression` enables the bottommost layer to use the compression algorithm of the best compression effect from the beginning. ++ If you do not want to set the compression algorithm for the bottommost layer, set the value of this configuration parameter to `disable`. ++ Default value: "zstd" + +### `write-buffer-size` + ++ Memtable size ++ Default value: `"128MB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `max-write-buffer-number` + ++ The maximum number of memtables ++ Default value: `5` ++ Minimum value: `0` + +### `min-write-buffer-number-to-merge` + ++ The minimum number of memtables required to trigger flush ++ Default value: `1` ++ Minimum value: `0` + +### `max-bytes-for-level-base` + ++ The maximum number of bytes at base level (L1). Generally, it is set to 4 times the size of a memtable. ++ Default value: `"512MB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `target-file-size-base` + ++ The size of the target file at base level. This value is overridden by `compaction-guard-max-output-file-size` when the `enable-compaction-guard` value is `true`. ++ Default: `"8MB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `level0-file-num-compaction-trigger` + ++ The maximum number of files at L0 that trigger compaction ++ Default value: `4` ++ Minimum value: `0` + +### `level0-slowdown-writes-trigger` + ++ The maximum number of files at L0 that trigger write stall ++ Default value: `20` ++ Minimum value: `0` + +### `level0-stop-writes-trigger` + ++ The maximum number of files at L0 required to completely block write ++ Default value: `36` ++ Minimum value: `0` + +### `max-compaction-bytes` + ++ The maximum number of bytes written into disk per compaction ++ Default value: `"2GB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `compaction-pri` + ++ The priority type of compaction ++ Optional values: `3` (`MinOverlappingRatio`), `0` (`ByCompensatedSize`), `1` (`OldestLargestSeqFirst`), `2` (`OldestSmallestSeqFirst`) ++ Default value: `3` + +### `dynamic-level-bytes` + ++ Determines whether to optimize dynamic level bytes ++ Default value: `true` + +### `num-levels` + ++ The maximum number of levels in a RocksDB file ++ Default value: `7` + +### `max-bytes-for-level-multiplier` + ++ The default amplification multiple for each layer ++ Default value: `10` + +### `rocksdb.defaultcf.compaction-style` + ++ Compaction method ++ Optional values: `"level"`, `"universal"` ++ Default value: `"level"` + +### `disable-auto-compactions` + ++ Enables or disables automatic compaction ++ Default value: `false` + +### `soft-pending-compaction-bytes-limit` + ++ The soft limit on the pending compaction bytes ++ Default value: `"192GB"` ++ Unit: KB|MB|GB + +### `hard-pending-compaction-bytes-limit` + ++ The hard limit on the pending compaction bytes ++ Default value: `"256GB"` ++ Unit: KB|MB|GB + +### `enable-compaction-guard` + ++ Enables or disables the compaction guard, which is an optimization to split SST files at TiKV Region boundaries. This optimization can help reduce compaction I/O and allows TiKV to use larger SST file size (thus less SST files overall) and at the time efficiently clean up stale data when migrating Regions. ++ Default value: `true` + +### `compaction-guard-min-output-file-size` + ++ The minimum SST file size when the compaction guard is enabled. This configuration prevents SST files from being too small when the compaction guard is enabled. ++ Default value: `"8MB"` ++ Unit: KB|MB|GB + +### `compaction-guard-max-output-file-size` + ++ The maximum SST file size when the compaction guard is enabled. The configuration prevents SST files from being too large when the compaction guard is enabled. This configuration overrides `target-file-size-base` for the same column family. ++ Default value: `"128MB"` ++ Unit: KB|MB|GB + +## rocksdb.defaultcf.titan + +Configuration parameters related to `rocksdb.defaultcf.titan`. + +### `min-blob-size` + ++ The smallest value stored in a Blob file. Values smaller than the specified size are stored in the LSM-Tree. ++ Default value: `"1KB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `blob-file-compression` + ++ The compression algorithm used in a Blob file ++ Optional values: `"no"`, `"snappy"`, `"zlib"`, `"bzip2"`, `"lz4"`, `"lz4hc"`, `"zstd"` ++ Default value: `"lz4"` + +### `blob-cache-size` + ++ The cache size of a Blob file ++ Default value: `"0GB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `min-gc-batch-size` + ++ The minimum total size of Blob files required to perform GC for one time ++ Default value: `"16MB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `max-gc-batch-size` + ++ The maximum total size of Blob files allowed to perform GC for one time ++ Default value: `"64MB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `discardable-ratio` + ++ The ratio at which GC is triggered for Blob files. The Blob file can be selected for GC only if the proportion of the invalid values in a Blob file exceeds this ratio. ++ Default value: `0.5` ++ Minimum value: `0` ++ Maximum value: `1` + +### `sample-ratio` + ++ The ratio of (data read from a Blob file/the entire Blob file) when sampling the file during GC ++ Default value: `0.1` ++ Minimum value: `0` ++ Maximum value: `1` + +### `merge-small-file-threshold` + ++ When the size of a Blob file is smaller than this value, the Blob file might still be selected for GC. In this situation, `discardable-ratio` is ignored. ++ Default value: `"8MB"` ++ Minimum value: `0` ++ Unit: KB|MB|GB + +### `blob-run-mode` + ++ Specifies the running mode of Titan. ++ Optional values: + + `normal`: Writes data to the blob file when the value size exceeds `min-blob-size`. + + `read_only`: Refuses to write new data to the blob file, but still reads the original data from the blob file. + + `fallback`: Writes data in the blob file back to LSM. ++ Default value: `normal` + +### `level-merge` + ++ Determines whether to optimize the read performance. When `level-merge` is enabled, there is more write amplification. ++ Default value: `false` + +### `gc-merge-rewrite` + ++ Determines whether to use the merge operator to write back blob indexes for Titan GC. When `gc-merge-rewrite` is enabled, it reduces the effect of Titan GC on the writes in the foreground. ++ Default value: `false` + +## rocksdb.writecf + +Configuration parameters related to `rocksdb.writecf`. + +### `block-cache-size` + ++ Block cache size ++ Default value: `Total machine memory * 15%` ++ Unit: MB|GB + +### `optimize-filters-for-hits` + ++ Determines whether to optimize the hit ratio of the filter ++ Default value: `false` + +### `whole-key-filtering` + ++ Determines whether to put the entire key to bloom filter ++ Default value: `false` + +### `enable-compaction-guard` + ++ Enables or disables the compaction guard, which is an optimization to split SST files at TiKV Region boundaries. This optimization can help reduce compaction I/O and allows TiKV to use larger SST file size (thus less SST files overall) and at the time efficiently clean up stale data when migrating Regions. ++ Default value: `true` + +### `compaction-guard-min-output-file-size` + ++ The minimum SST file size when the compaction guard is enabled. This configuration prevents SST files from being too small when the compaction guard is enabled. ++ Default value: `"8MB"` ++ Unit: KB|MB|GB + +### `compaction-guard-max-output-file-size` + ++ The maximum SST file size when the compaction guard is enabled. The configuration prevents SST files from being too large when the compaction guard is enabled. This configuration overrides `target-file-size-base` for the same column family. ++ Default value: `"128MB"` ++ Unit: KB|MB|GB + +## rocksdb.lockcf + +Configuration parameters related to `rocksdb.lockcf`. + +### `block-cache-size` + ++ Block cache size ++ Default value: `Total machine memory * 2%` ++ Unit: MB|GB + +### `optimize-filters-for-hits` + ++ Determines whether to optimize the hit ratio of the filter ++ Default value: `false` + +### `level0-file-num-compaction-trigger` + ++ The number of files at L0 required to trigger compaction ++ Default value: `1` + +## raftdb + +Configuration parameters related to `raftdb`. + +### `max-background-jobs` + ++ The number of background threads in RocksDB ++ Default value: `4` ++ Minimum value: `2` + +### `max-sub-compactions` + ++ The number of concurrent sub-compaction operations performed in RocksDB ++ Default value: `2` ++ Minimum value: `1` + +### `wal-dir` + ++ The directory in which WAL files are stored ++ Default value: `"/tmp/tikv/store"` + +## security + +Configuration parameters related to security. + +### `ca-path` + ++ The path of the CA file ++ Default value: "" + +### `cert-path` + ++ The path of the Privacy Enhanced Mail (PEM) file that contains the X509 certificate ++ Default value: "" + +### `key-path` + ++ The path of the PEM file that contains the X509 key ++ Default value: "" + +### `redact-info-log` + ++ This configuration parameter enables or disables log redaction. If the configuration value is set to `true`, all user data in the log will be replaced by `?`. ++ Default value: `false` + +## security.encryption + +Configuration parameters related to [encryption at rest](https://docs.pingcap.com/tidb/stable/encryption-at-rest) (TDE). + +### `data-encryption-method` + ++ The encryption method for data files ++ Value options: "plaintext", "aes128-ctr", "aes192-ctr", and "aes256-ctr" ++ A value other than "plaintext" means that encryption is enabled, in which case the master key must be specified. ++ Default value: `"plaintext"` + +### `data-key-rotation-period` + ++ Specifies how often TiKV rotates the data encryption key. ++ Default value: `7d` + +### enable-file-dictionary-log + ++ Enables the optimization to reduce I/O and mutex contention when TiKV manages the encryption metadata. ++ To avoid possible compatibility issues when this configuration parameter is enabled (by default), see [Encryption at Rest - Compatibility between TiKV versions](https://docs.pingcap.com/tidb/stable/encryption-at-rest#compatibility-between-tikv-versions) for details. ++ Default value: `true` + +### master-key + ++ Specifies the master key if encryption is enabled. To learn how to configure a master key, see [Encryption at Rest - Configure encryption](https://docs.pingcap.com/tidb/stable/encryption-at-rest#configure-encryption). + +### previous-master-key + ++ Specifies the old master key when rotating the new master key. The configuration format is the same as that of `master-key`. To learn how to configure a master key, see [Encryption at Rest - Configure encryption](https://docs.pingcap.com/tidb/stable/encryption-at-rest#configure-encryption). + +## import + +Configuration parameters related to TiDB Lightning import and BR restore. + +### `num-threads` + ++ The number of threads to process RPC requests ++ Default value: `8` ++ Minimum value: `1` + +### `num-import-jobs` + ++ The number of jobs imported concurrently ++ Default value: `8` ++ Minimum value: `1` + +## gc + +The configuration parameter related to gc. + +### `enable-compaction-filter` + ++ Controls whether to enable the GC in Compaction Filter feature ++ Default value: `false` + +## backup + +The configuration parameter related to BR backup. + +### `num-threads` + ++ The number of worker threads to process backup ++ Default value: `MIN(CPU * 0.75, 32)`. ++ Minimum value: `1` + +## cdc + +Configuration parameters related to TiCDC. + +### `min-ts-interval` + ++ The interval at which Resolved TS is calculated and forwarded. ++ Default value: `"1s"` + +### `old-value-cache-size` + ++ The entry number of TiCDC old values cached in memory. ++ Default value: `1024` + +### `incremental-scan-speed-limit` + ++ The maximum speed at which historical data is incrementally scanned. ++ Default value: `"128MB"`, which means 128 MB per second. + +## pessimistic-txn + +### `wait-for-lock-timeout` + +- The longest time that a pessimistic transaction in TiKV waits for other transactions to release the lock. If the time is out, an error is returned to client, and client retries to add a lock. The lock wait timeout is set by `innodb_lock_wait_timeout`. +- Default value: `"1s"` +- Minimum value: `"1ms"` + +### `wait-up-delay-duration` + +- When pessimistic transactions release the lock, among all the transactions waiting for lock, only the transaction with the smallest `start_ts` is woken up. Other transactions will be woken up after `wait-up-delay-duration`. +- Default value: `"20ms"` + +### `pipelined` + +- This configuration parameter enables the pipelined process of adding the pessimistic lock. With this feature enabled, after detecting that data can be locked, TiKV immediately notifies client to execute the subsequent requests and write the pessimistic lock asynchronously, which reduces most of the latency and significantly improves the performance of pessimistic transactions. But there is a still low probability that the asynchronous write of the pessimistic lock fails, which might cause the failure of pessimistic transaction commits. +- Default value: `true` diff --git a/content/docs/7.1/deploy/configure/titan.md b/content/docs/7.1/deploy/configure/titan.md new file mode 100644 index 00000000..734a24dd --- /dev/null +++ b/content/docs/7.1/deploy/configure/titan.md @@ -0,0 +1,59 @@ +--- +title: RocksDB Titan Config +description: Learn how to enable Titan in TiKV. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 11 + identifier: RocksDB Titan Config-7.1 +--- + +Titan is a RocksDB plugin developed by PingCAP to separate keys and values in RocksDB storage. The goal of Titan is to reduce the write amplification when storing large values. + +## How Titan works + +{{< figure + src="/img/docs/titan-architecture.png" + caption="Titan Architecture" + number="" >}} + +Titan store values separately from the LSM-tree during flush and compaction. The value in the LSM tree is the position index to the blob file of the real value. For more details on the design and implementation of Titan, see [Titan: A RocksDB Plugin to Reduce Write Amplification](https://pingcap.com/blog/titan-storage-engine-design-and-implementation/). + +{{< info >}} +**Notes:** Although Titan improves write performance, it enlarges data storage size and reduces range scan performance at the same time. Therefore, it is recommended to use Titan when the average size of values is larger than 1KB. +{{< /info >}} + +## How to enable Titan + +{{< warning >}} +As Titan has not reached ultimate maturity to be applied in production, it is disabled in TiKV by default. Before enabling it, make sure you understand the above notes and have evaluated your scenario and needs. +{{< /warning >}} + +To enable Titan in TiKV, set the following in the TiKV configuration file: + +```toml +[rocksdb.titan] +# Enables or disables `Titan`. Note that Titan is still an experimental feature. +# default: false +enabled = true +``` + +For the information of all the Titan configuration parameters, see [Titan-related parameters](../tikv-configuration-file/#rocksdbtitan). + +## How to fall back to RocksDB + +If you find Titan does not help or causes read or other performance issues, you can take the following steps to fall back to RocksDB: + +1. Enter the fallback mode using `tikv-ctl`: + + ```bash + tikv-ctl --host 127.0.0.1:20160 modify-tikv-config -m kvdb -n default.blob_run_mode -v "kFallback" + ``` + + {{< info >}} +When using this command, make sure you have already enabled Titan. + {{< /info >}} + +2. Wait until the number of blob files reduces to 0. You can also accelerate it by `tikv-ctl compact-cluster`. + +3. Set `rocksdb.titan.enabled=false` in the TiKV configuration file, and then restart TiKV. diff --git a/content/docs/7.1/deploy/configure/topology.md b/content/docs/7.1/deploy/configure/topology.md new file mode 100644 index 00000000..2ff5f85f --- /dev/null +++ b/content/docs/7.1/deploy/configure/topology.md @@ -0,0 +1,98 @@ +--- +title: Topology Label Config +description: Learn how to configure topology labels. +menu: + "7.1": + parent: Configure TiKV-7.1 + weight: 6 + identifier: Topology Label Config-7.1 +--- + +TiKV uses topology labels (hereafter referred to as the labels) to declare its location information, and PD scheduler uses the labels to optimize TiKV's failure tolerance capability. This document describes how to configure the labels. + +## Declare the label hierarchy in PD + +The labels are hierarchical, for example, `zone > rack > host`. You can declare their hierarchies in the PD configuration file or `pd-ctl`: + +- PD configuration file: + ```toml + [replication] + max-replicas = 3 + location-labels = ["zone", "rack", "host"] + ``` +- pd-ctl: + + ```toml + pd-ctl >> config set location-labels zone,rack,host + ``` + {{< warning >}} +The number of machines must be no less than the `max-replicas`. + {{< /warning >}} + +For the information of all replication configuration parameters, see [Replication-related parameters](../pd-configuration-file/#replication). + +## Declare the labels for TiKV + +Assume that the topology has three layers: `zone > rack > host`. You can set a label for each layer by command line parameter or configuration file, then TiKV reports its label to PD: + +- TiKV command line parameter: + + ```bash + tikv-server --labels zone=,rack=,host= + ``` + +- TiKV configuration file: + + ```toml + [server] + labels = "zone=,rack=,host=" + ``` + +## Example + +PD makes optimal scheduling according to the topological information. You only need to care about what kind of topology can achieve the desired effect. + +If you use 3 replicas and hope that the TiKV cluster is always highly available even when a data zone goes down, you need at least 4 data zones. + +Assume that you have 4 data zones, each zone has 2 racks, and each rack has 2 hosts. You can start 2 TiKV instances on each host as follows: + +Start TiKV: + +```bash +# zone=z1 +tikv-server --labels zone=z1,rack=r1,host=h1 +tikv-server --labels zone=z1,rack=r1,host=h2 +tikv-server --labels zone=z1,rack=r2,host=h1 +tikv-server --labels zone=z1,rack=r2,host=h2 + +# zone=z2 +tikv-server --labels zone=z2,rack=r1,host=h1 +tikv-server --labels zone=z2,rack=r1,host=h2 +tikv-server --labels zone=z2,rack=r2,host=h1 +tikv-server --labels zone=z2,rack=r2,host=h2 + +# zone=z3 +tikv-server --labels zone=z3,rack=r1,host=h1 +tikv-server --labels zone=z3,rack=r1,host=h2 +tikv-server --labels zone=z3,rack=r2,host=h1 +tikv-server --labels zone=z3,rack=r2,host=h2 + +# zone=z4 +tikv-server --labels zone=z4,rack=r1,host=h1 +tikv-server --labels zone=z4,rack=r1,host=h2 +tikv-server --labels zone=z4,rack=r2,host=h1 +tikv-server --labels zone=z4,rack=r2,host=h2 +``` + +Configure PD: + +```bash +# use `pd-ctl` connect the PD: +$ pd-ctl +>> config set location-labels zone,rack,host +``` + +Now, PD schedules replicas of the same `Region` to different data zones. + +- Even if one data zone goes down, the TiKV cluster is still highly available. +- If the data zone cannot recover within a period of time, PD removes the replica from this data zone. diff --git a/content/docs/7.1/deploy/deploy.md b/content/docs/7.1/deploy/deploy.md new file mode 100644 index 00000000..8032d93d --- /dev/null +++ b/content/docs/7.1/deploy/deploy.md @@ -0,0 +1,66 @@ +--- +title: Deploy +description: Learn how to deploy and operate a TiKV cluster +menu: + "7.1": + weight: 3 + identifier: Deploy-7.1 +--- + +Learn to deploy, configure, monitor, and scale TiKV as you adopt the service into your project and infrastructure. + +## [Install TiKV](../install/install/) + +In the [Install TiKV](../install/install/) section you’ll find several guides to help you deploy and integrate TiKV into your infrastructure. + +The best supported and most comprehensive deployment solution for production environment is to [Deploy TiKV using TiUP](../install/production/). + +If you’re determined to strike it out on your own, we’ve done our best to provide you with the tools you need to build your own solution. Start with [Install binary manually](../install/test/#install-binary-manually). + +If you want to try TiKV on your own Mac or Linux machine, please try [TiUP Playground](../install/test/#tiup-playground). + +## [Configure TiKV](../configure/introduction/) + +Learn about how you can configure TiKV to meet your needs in the [configure](../configure/introduction/) section. There you’ll find a number of guides including: + +PD + +- [PD Command Line Parameters](../pd-command-line): Learn configuration flags of PD. +- [PD Config](../pd-configuration-file): Learn the PD configuration file. + +TiKV + +- [TiKV Command Line Parameters](../tikv-command-line): Learn configuration flags of TiKV. +- [TiKV Config](../tikv-configuration-file): Learn the TiKV configuration file. +- [Security](../security): Use TLS security and review security procedures. +- [Topology Lable](../topology): Use location awareness to improve resiliency and performance. +- [Limit](../limit): Learn how to configure scheduling rate limit on stores. +- [Region Merge](../region-merge): Tweak region merging. +- [RocksDB](../rocksdb): Tweak RocksDB configuration parameters. +- [Raftstore](../raftstore): Learn how to configure Raftstore in TiKV. +- [Titan](../titan): Enable titan to improve performance with large values. +- [Storage](../storage): Learn how to configure storage in TiKV. +- [gRPC](../grpc): Learn how to configure gRPC in TiKV. +- [Coprocessor](../coprocessor): Learn how to configure Coprocessor in TiKV. + + +## [Benchmark and Performance](../benchmark/benchmark/) + +## [Monitor and Alert](../monitor/monitor/) + +Learn how to inspect a TiKV cluster in the [Monitor and Alert](../monitor/monitor/) section. You’ll find out + +- [Monitoring Framework](../monitor/framework/): Use Prometheus and Grafana to build the TiKV monitoring framework. +- [Monitoring API](../monitor/api/): Learn the API of TiKV monitoring services. +- [Deploy Monitoring Services](../monitor/deploy/): Learn how to deploy monitoring services for the TiKV cluster. +- [Export Grafana Shapshots](../monitor/grafana/): Learn how to export snapshots of Grafana Dashboard, and how to visualize these files. +- [Key Metrics](../monitor/key-metrics/): Learn some key metrics displayed on the Grafana Overview dashboard. +- [TiKV Cluster Alert Rules](../monitor/alert/): Learn the alert rules in a TiKV cluster. + +## [Operate TiKV](../operate/operate/) + +This section introduces how to maintain and operate a TiKV cluster. + +- [Upgrade a TiKV cluster using TiUP](../operate/upgrade): Learn how to upgrade TiKV using TiUP +- [Scale out/in a TiKV cluster using TiUP](../operate/scale): How to grow and shrink your TiKV cluster. +- [Maintain a TiKV cluster using TiUP](../operate/maintain): Learn the common operations to operate and maintain a TiKV cluster using TiUP diff --git a/content/docs/7.1/deploy/install/install.md b/content/docs/7.1/deploy/install/install.md new file mode 100644 index 00000000..2929f6c5 --- /dev/null +++ b/content/docs/7.1/deploy/install/install.md @@ -0,0 +1,16 @@ +--- +title: Install TiKV +description: TiKV deployment prerequisites and methods +menu: + "7.1": + parent: Deploy-7.1 + weight: 1 + identifier: Install TiKV-7.1 +--- + +This section introduces how to deploy and verify a TiKV cluster in the production environment. + +- [Prerequisites](../prerequisites) +- [Production Deployment](../production) +- [Verify Cluster Status](../verify) +- [Test Deployment](../test) diff --git a/content/docs/7.1/deploy/install/prerequisites.md b/content/docs/7.1/deploy/install/prerequisites.md new file mode 100644 index 00000000..0f94df09 --- /dev/null +++ b/content/docs/7.1/deploy/install/prerequisites.md @@ -0,0 +1,101 @@ +--- +title: Prerequisites +description: Learn the software and hardware recommendations for deploying and running TiKV +menu: + "7.1": + parent: Install TiKV-7.1 + weight: 1 + identifier: Prerequisites-7.1 +--- + +TiKV can be deployed in the Intel architecture server, ARM architecture server, and major virtualization environments and runs well. TiKV supports most of the major hardware networks and Linux operating systems. + +## Linux OS version requirements + +| Linux OS Platform | Version | +|:------------------------:|:-------------------------:| +| Red Hat Enterprise Linux | 7.3 or later 7.x releases | +| CentOS | 7.3 or later 7.x releases | +| Oracle Enterprise Linux | 7.3 or later 7.x releases | +| Ubuntu LTS | 16.04 or later | + +Other Linux OS versions such as Debian Linux and Fedora Linux might work but are not officially supported. + + +## Software recommendations + +### Control machine + +| Software | Version | +|:-------- |:-------------- | +| sshpass | 1.06 or later | +| TiUP | 1.4.0 or later | + +{{< info >}} +It is required that you [deploy TiUP on the control machine](../production#step-1-install-tiup-on-the-control-machine) to operate and manage TiKV clusters. +{{< /info >}} + +### Target machines + +| Software | Version | +|:-------- |:--------------- | +| sshpass | 1.06 or later | +| numa | 2.0.12 or later | +| tar | any | + +## Server recommendations + +You can deploy and run TiKV on the 64-bit generic hardware server platform in the Intel x86-64 architecture or on the hardware server platform in the ARM architecture. The recommendations about server hardware configuration (ignoring the resources occupied by the operating system itself) for development, test, and production environments are as follows: + +### Development and test environments + +| Component | CPU | Memory | Local Storage | Network | Instance Number (Minimum Requirement) | +|:---------:|:-------:|:------:|:-------------:|:--------------------:|:-------------------------------------:| +| PD | 4 core+ | 8 GB+ | SAS, 200 GB+ | Gigabit network card | 1 | +| TiKV | 8 core+ | 32 GB+ | SAS, 200 GB+ | Gigabit network card | 3 | + +{{< info >}} +- In the test environment, the TiKV and PD instances can be deployed on the same server. +- For performance-related test, do not use low-performance storage and network hardware configuration, in order to guarantee the correctness of the test result. +- For the TiKV server, it is recommended to use NVMe SSDs to ensure faster reads and writes. +{{< /info >}} + +### Production environment + +| Component | CPU | Memory | Hard Disk Type | Network | Instance Number (Minimum Requirement) | +|:---------:|:--------:|:------:|:--------------:|:-------------------------------------:|:-------------------------------------:| +| PD | 4 core+ | 8 GB+ | SSD | 10 Gigabit network card (2 preferred) | 3 | +| TiKV | 16 core+ | 32 GB+ | SSD | 10 Gigabit network card (2 preferred) | 3 | + +{{< info >}} +- It is strongly recommended to use higher configuration in the production environment. +- It is recommended to keep the size of TiKV hard disk within 2 TB if you are using PCIe SSDs or within 1.5 TB if you are using regular SSDs. +{{< /info >}} + + +## Network requirements + +TiKV uses the following network ports, and their default port numbers are listed below. Based on the actual environments, you can change the port number in the configuration. + +| Component | Default Port | Description | +|:-----------------:|:------------:|:--------------------------------------------------------------------------------------- | +| TiKV | 20160 | the TiKV communication port | +| TiKV | 20180 | the port for fetching statistics, used by Prometheus | +| PD | 2379 | the client port, entrance for the clients to connect TiKV cluster | +| PD | 2380 | the inter-node communication port within the PD cluster | +| Prometheus | 9090 | the communication port for the Prometheus service | +| Node_exporter | 9100 | the communication port to report the system information of every TiKV cluster node | +| Blackbox_exporter | 9115 | the Blackbox_exporter communication port, used to monitor the ports in the TiKV cluster | +| Grafana | 3000 | the port for the external Web monitoring service and client (Browser) access | + +To ensure correct configuration, create echo servers on the ports/IP addresses by using `ncat` (from the `nmap` package): + +```bash +ncat -l $PORT -k -c 'xargs -n1 echo' +``` + +Then, from the other machines, verify that the echo server is reachable with `curl $IP:$PORT`. + +## Web browser requirements + +TiKV relies on [Grafana](https://grafana.com/) to provide visualization of database metrics. A recent version of Internet Explorer, Chrome or Firefox with Javascript enabled is sufficient. diff --git a/content/docs/7.1/deploy/install/production.md b/content/docs/7.1/deploy/install/production.md new file mode 100644 index 00000000..eda4d099 --- /dev/null +++ b/content/docs/7.1/deploy/install/production.md @@ -0,0 +1,167 @@ +--- +title: Production Deployment +description: Deploy a TiKV Cluster for Production Using TiUP +menu: + "7.1": + parent: Install TiKV-7.1 + weight: 2 + identifier: Production Deployment-7.1 +--- + +This guide describes how to install and deploy TiKV for production environment. + +[TiUP](https://github.com/pingcap/tiup) is a cluster operation and maintenance tool. TiUP provides [TiUP cluster](https://github.com/pingcap/tiup/tree/master/components/cluster), a cluster management component written in Golang. By using TiUP cluster, you can easily do daily operations, including deploying, starting, stopping, destroying, scaling, and upgrading a TiKV cluster, and manage cluster parameters. + +## Step 1: Install TiUP on the control machine + +Log in to the control machine using a regular user account (take the `tikv` user as an example). All the following TiUP installation and cluster management operations can be performed by the `tikv` user. + +1. Install TiUP by executing the following command: + + ```bash + curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh + ``` + +2. Set the TiUP environment variables: + + Redeclare the global environment variables: + + ```bash + source .bash_profile + ``` + + Confirm whether TiUP is installed: + + ```bash + tiup + ``` + +3. Install the TiUP cluster component: + + ```bash + tiup cluster + ``` + +4. If TiUP has been already installed, update the TiUP cluster component to the latest version: + + ```bash + tiup update --self && tiup update cluster + ``` + +5. Verify the current version of your TiUP cluster: + + ```bash + tiup --binary cluster + ``` + +## Step 2: Initialize cluster topology file + +According to the intended cluster topology, you need to manually create and edit the cluster initialization configuration file. + +To create the cluster initialization configuration file, you can create a YAML-formatted configuration file on the control machine using TiUP: + +```bash +tiup cluster template > topology.yaml +``` + +Execute `vi topology.yaml` to edit the configuration file content: + +```yaml +global: + user: "tikv" + ssh_port: 22 + deploy_dir: "/tikv-deploy" + data_dir: "/tikv-data" +server_configs: {} +pd_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +monitoring_servers: + - host: 10.0.1.7 +grafana_servers: + - host: 10.0.1.7 +``` + +{{< info >}} +- For parameters that should be globally effective, configure these parameters of corresponding components in the `server_configs` section of the configuration file. +- For parameters that should be effective on a specific node, configure these parameters in the `config` of this node. +- Use `.` to indicate the subcategory of the configuration, such as `storage.scheduler-concurrency`. For more formats, see [TiUP configuration template](https://github.com/pingcap/tiup/blob/master/embed/templates/examples/topology.example.yaml). +- For more parameter description, see [TiKV config.toml.example](https://github.com/tikv/tikv/blob/release-5.0/etc/config-template.toml), [PD config.toml.example](https://github.com/tikv/pd/blob/release-5.0/conf/config.toml) configuration. +{{< /info >}} + +## Step 3: Execute the deployment command + +{{< info >}} +You can use secret keys or interactive passwords for security authentication when you deploy TiKV using TiUP: + +- If you use secret keys, you can specify the path of the keys through `-i` or `--identity_file`; +- If you use passwords, add the `-p` flag to enter the password interaction window; +- If password-free login to the target machine has been configured, no authentication is required. + +In general, TiUP creates the user and group specified in the `topology.yaml` file on the target machine, with the following exceptions: + +- The user name configured in `topology.yaml` already exists on the target machine. +- You have used the `--skip-create-user` option in the command line to explicitly skip the step of creating the user. +{{< /info >}} + +Before you execute the `deploy` command, use the `check` and `check --apply` commands to detect and automatically repair the potential risks in the cluster: + +```bash +tiup cluster check ./topology.yaml --user root [-p] [-i /home/root/.ssh/gcp_rsa] +tiup cluster check ./topology.yaml --apply --user root [-p] [-i /home/root/.ssh/gcp_rsa] +``` + +Then execute the `deploy` command to deploy the TiKV cluster: + +```shell +tiup cluster deploy tikv-test v5.0.1 ./topology.yaml --user root [-p] [-i /home/root/.ssh/gcp_rsa] +``` + +In the above command: + +- The name of the deployed TiKV cluster is `tikv-test`. +- You can see the latest supported versions by running `tiup list tikv`. This document takes `v5.0.1` as an example. +- The initialization configuration file is `topology.yaml`. +- `--user root`: Log in to the target machine through the `root` key to complete the cluster deployment, or you can use other users with `ssh` and `sudo` privileges to complete the deployment. +- `[-i]` and `[-p]`: optional. If you have configured login to the target machine without password, these parameters are not required. If not, choose one of the two parameters. `[-i]` is the private key of the `root` user (or other users specified by `--user`) that has access to the target machine. `[-p]` is used to input the user password interactively. +- If you need to specify the user group name to be created on the target machine, see [this example](https://github.com/pingcap/tiup/blob/master/embed/templates/examples/topology.example.yaml#L7). + +At the end of the output log, you will see ```Deployed cluster `tikv-test` successfully```. This indicates that the deployment is successful. + +## Step 4: Check the clusters managed by TiUP + +```bash +tiup cluster list +``` + +TiUP supports managing multiple TiKV clusters. The command above outputs information of all the clusters currently managed by TiUP, including the name, deployment user, version, and secret key information. + +## Step 5: Check the status of the deployed TiKV cluster + +For example, execute the following command to check the status of the `tikv-test` cluster: + +```bash +tiup cluster display tikv-test +``` + +Expected output includes the instance ID, role, host, listening port, and status (because the cluster is not started yet, so the status is `Down`/`inactive`), and directory information. + +## Step 6: Start the TiKV cluster + +```shell +tiup cluster start tikv-test +``` + +If the output log includes ```Started cluster `tikv-test` successfully```, the deployment is successful. + +## Step 7: Verify the running status of the TiKV cluster +For the specific operations, see [Verify Cluster Status](../verify). + +{{< info >}} +Please refer to [TiUP cluster document](https://docs.pingcap.com/tidb/stable/tiup-cluster) to find more TiUP cluster commands and usages. +{{< /info >}} diff --git a/content/docs/7.1/deploy/install/test.md b/content/docs/7.1/deploy/install/test.md new file mode 100644 index 00000000..41f2ab37 --- /dev/null +++ b/content/docs/7.1/deploy/install/test.md @@ -0,0 +1,216 @@ +--- +title: Test Deployment +description: Deploy a TiKV Cluster for Test +menu: + "7.1": + parent: Install TiKV-7.1 + weight: 4 + identifier: Test Deployment-7.1 +--- +This guide describes how to install and deploy TiKV for test using TiUP playground and binary installation. + +## TiUP Playground + +This chapter describes how to deploy a TiKV cluster using TiUP Playground. + +1. Install TiUP by executing the following command: + + ```bash + curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh + ``` + +2. Set the TiUP environment variables: + + Redeclare the global environment variables: + + ```bash + source .bash_profile + ``` + + Confirm whether TiUP is installed: + + ```bash + tiup + ``` + +3. If TiUP has been already installed, update the TiUP playground component to the latest version: + + ```bash + tiup update --self && tiup update playground + ``` + +4. Use TiUP playground to start a local TiKV cluster + + Show TiUP version: + + ```bash + tiup -v + ``` + + version >= 1.5.2: + + ```bash + tiup playground --mode tikv-slim + ``` + + version < 1.5.2: + + ```bash + tiup playground + ``` + +5. Press `Ctrl + C` to stop the local TiKV cluster + +{{< info >}} +Refer to [TiUP playground document](https://docs.pingcap.com/tidb/stable/tiup-playground) to find more TiUP playground commands. +{{< /info >}} + +## Install binary manually + +This chapter describes how to deploy a TiKV cluster using binary files. + +- To quickly understand and try TiKV, see [Deploy the TiKV cluster on a single machine](#deploy-the-tikv-cluster-on-a-single-machine). +- To try TiKV out and explore the features, see [Deploy the TiKV cluster on multiple nodes for testing](#deploy-the-tikv-cluster-on-multiple-nodes-for-testing). + +{{< warning >}} +The TiKV team strongly recommends you use the [**TiUP Cluster Deployment**](../production/) method. + +Other methods are documented for informational purposes. +{{< /warning >}} + +### Deploy the TiKV cluster on a single machine + +This section describes how to deploy TiKV on a single machine (Linux for example). Take the following steps: + +1. Download the official binary package. + + ```bash + # Download the package. + wget https://download.pingcap.org/tidb-latest-linux-amd64.tar.gz + wget http://download.pingcap.org/tidb-latest-linux-amd64.sha256 + + # Check the file integrity. If the result is OK, the file is correct. + sha256sum -c tidb-latest-linux-amd64.sha256 + + # Extract the package. + tar -xzf tidb-latest-linux-amd64.tar.gz + cd tidb-latest-linux-amd64 + ``` + +2. Start PD. + + ```bash + ./bin/pd-server --name=pd1 \ + --data-dir=pd1 \ + --client-urls="http://127.0.0.1:2379" \ + --peer-urls="http://127.0.0.1:2380" \ + --initial-cluster="pd1=http://127.0.0.1:2380" \ + --log-file=pd1.log + ``` + +3. Start TiKV. + + To start the 3 TiKV instances, open a new terminal tab or window, come to the `tidb-latest-linux-amd64` directory, and start the instances using the following command: + + ```bash + ./bin/tikv-server --pd-endpoints="127.0.0.1:2379" \ + --addr="127.0.0.1:20160" \ + --data-dir=tikv1 \ + --log-file=tikv1.log + + ./bin/tikv-server --pd-endpoints="127.0.0.1:2379" \ + --addr="127.0.0.1:20161" \ + --data-dir=tikv2 \ + --log-file=tikv2.log + + ./bin/tikv-server --pd-endpoints="127.0.0.1:2379" \ + --addr="127.0.0.1:20162" \ + --data-dir=tikv3 \ + --log-file=tikv3.log + ``` + +You can use the [pd-ctl](https://github.com/pingcap/pd/tree/master/tools/pd-ctl) tool to verify whether PD and TiKV are successfully deployed: + +```bash +./bin/pd-ctl store -d -u http://127.0.0.1:2379 +``` + +If the state of all the TiKV instances is "Up", you have successfully deployed a TiKV cluster. + +### Deploy the TiKV cluster on multiple nodes for testing + +This section describes how to deploy TiKV on multiple nodes. If you want to test TiKV with a limited number of nodes, you can use one PD instance to test the entire cluster. + +Assume that you have four nodes, you can deploy 1 PD instance and 3 TiKV instances. For details, see the following table: + +| Name | Host IP | Services | +|:----- |:--------------- |:-------- | +| Node1 | 192.168.199.113 | PD1 | +| Node2 | 192.168.199.114 | TiKV1 | +| Node3 | 192.168.199.115 | TiKV2 | +| Node4 | 192.168.199.116 | TiKV3 | + +To deploy a TiKV cluster with multiple nodes for test, take the following steps: + +1. Download the official binary package on each node. + + ```bash + # Download the package. + wget https://download.pingcap.org/tidb-latest-linux-amd64.tar.gz + wget http://download.pingcap.org/tidb-latest-linux-amd64.sha256 + + # Check the file integrity. If the result is OK, the file is correct. + sha256sum -c tidb-latest-linux-amd64.sha256 + + # Extract the package. + tar -xzf tidb-latest-linux-amd64.tar.gz + cd tidb-latest-linux-amd64 + ``` + +2. Start PD on Node1. + + ```bash + ./bin/pd-server --name=pd1 \ + --data-dir=pd1 \ + --client-urls="http://192.168.199.113:2379" \ + --peer-urls="http://192.168.199.113:2380" \ + --initial-cluster="pd1=http://192.168.199.113:2380" \ + --log-file=pd1.log + ``` + +3. Log in and start TiKV on other nodes: Node2, Node3 and Node4. + + Node2: + + ```bash + ./bin/tikv-server --pd-endpoints="192.168.199.113:2379" \ + --addr="192.168.199.114:20160" \ + --data-dir=tikv1 \ + --log-file=tikv1.log + ``` + + Node3: + + ```bash + ./bin/tikv-server --pd-endpoints="192.168.199.113:2379" \ + --addr="192.168.199.115:20160" \ + --data-dir=tikv2 \ + --log-file=tikv2.log + ``` + + Node4: + + ```bash + ./bin/tikv-server --pd-endpoints="192.168.199.113:2379" \ + --addr="192.168.199.116:20160" \ + --data-dir=tikv3 \ + --log-file=tikv3.log + ``` + +You can use the [pd-ctl](https://github.com/pingcap/pd/tree/master/tools/pd-ctl) tool to verify whether PD and TiKV are successfully deployed: + +``` +./pd-ctl store -d -u http://192.168.199.113:2379 +``` + +The result displays the store count and detailed information regarding each store. If the state of all the TiKV instances is "Up", you have successfully deployed a TiKV cluster. diff --git a/content/docs/7.1/deploy/install/verify.md b/content/docs/7.1/deploy/install/verify.md new file mode 100644 index 00000000..01c7e6d0 --- /dev/null +++ b/content/docs/7.1/deploy/install/verify.md @@ -0,0 +1,75 @@ +--- +title: Verify Cluster Status +description: Check the cluster status and connect to the cluster +menu: + "7.1": + parent: Install TiKV-7.1 + weight: 3 + identifier: Verify Cluster Status-7.1 +--- + +After a TiKV cluster is deployed, you need to check whether the cluster runs normally. This document introduces how to check the cluster status using TiUP commands and Grafana, and how to connect to the TiKV cluster using a TiKV client to perform the simple `put` and `get` operations. + +## Check the TiKV cluster status + +This section describes how to check the TiKV cluster status using TiUP commands and Grafana. + +### Use TiUP + +Use the `tiup cluster display ` command to check the cluster status. For example: + +```bash +tiup cluster display tikv-test +``` + +Expected output: If the `Status` information of each node is `Up`, the cluster runs normally. + +### Use Grafana + +1. Log in to the Grafana monitoring at `${Grafana-ip}:3000`. The default username and password are both `admin`. + +2. Click **Overview** and check the TiKV port status and the load monitoring information. + +## Connect to the TiKV cluster and perform simple operations + +This section describes how to connect to the TiKV cluster using a TiKV client to perform the simple `put` and `get` operations. + +1. Download jars + + ```bash + curl -o tikv-client-java.jar https://download.pingcap.org/tikv-client-java-3.1.0-SNAPSHOT.jar + curl -o slf4j-api.jar https://repo1.maven.org/maven2/org/slf4j/slf4j-api/1.7.16/slf4j-api-1.7.16.jar + ``` + +2. Install `jshell` (include in JDK >= 9) + +3. Try the `put` and `get` operations + + To connect the TiKV cluster and use the `put` and `get` RawKV API, save the following script to the file `verify_tikv.java`. + + + ```java + import java.util.*; + import org.tikv.common.TiConfiguration; + import org.tikv.common.TiSession; + import org.tikv.raw.RawKVClient; + import org.tikv.shade.com.google.protobuf.ByteString; + + TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); + TiSession session = TiSession.create(conf); + RawKVClient client = session.createRawClient(); + + // put + client.put(ByteString.copyFromUtf8("key"), ByteString.copyFromUtf8("Hello, World!")); + + // get + System.out.println(client.get(ByteString.copyFromUtf8("key")).toStringUtf8()); + ``` + +4. Run the test script + + ```bash + jshell --class-path tikv-client-java.jar:slf4j-api.jar --startup verify_tikv.java + + Hello, World! + ``` diff --git a/content/docs/7.1/deploy/monitor/alert.md b/content/docs/7.1/deploy/monitor/alert.md new file mode 100644 index 00000000..29ffe690 --- /dev/null +++ b/content/docs/7.1/deploy/monitor/alert.md @@ -0,0 +1,861 @@ +--- +title: TiKV Cluster Alert Rules +description: Learn the alert rules in a TiKV cluster. +menu: + "7.1": + parent: Monitor and Alert-7.1 + weight: 6 + identifier: TiKV Cluster Alert Rules-7.1 +--- + +This document describes the alert rules for different components in a TiKV cluster, including the rule descriptions and solutions of the alert items in TiKV, PD, Node_exporter and Blackbox_exporter. + +Alert rules are divided into three categories according to the severity level (from high to low): emergency-level, critical-level, and warning-level. This division of severity levels applies to all alert items of each component below. + +| Severity level | Description | +|:--------------- |:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Emergency-level | The highest severity level at which the service is unavailable. Emergency-level alerts are often caused by a service or node failure. **Manual intervention is required immediately**. | +| Critical-level | Decreased service availability. For the critical-level alerts, a close watch on the abnormal metrics is required. | +| Warning-level | Warning-level alerts are a reminder for an issue or error. | + + +Download [TiKV Alert Rule configuration file](https://github.com/tikv/tikv/tree/release-5.0/metrics/alertmanager) and [PD Alter Rule configuration file](https://github.com/tikv/pd/tree/release-5.0/metrics/alertmanager). + +## PD alert rules + +This section gives the alert rules for the PD component. + +### Emergency-level alerts + +#### `PD_cluster_offline_tikv_nums` + +* Alert rule: + + `sum(pd_cluster_status{type="store_down_count"}) > 0` + +* Description: + + PD has not received a TiKV heartbeat for a long time (the default configuration is 30 minutes). + +* Solution: + + * Check whether the TiKV process is normal, the network is isolated or the load is too high, and recover the service as much as possible. + * If the TiKV instance cannot be recovered, you can make it offline. + +### Critical-level alerts + +#### `PD_etcd_write_disk_latency` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[1m])) by (instance,job,le)) > 1` + +* Description: + + etcd writes data to disk at a lower speed than normal. It might lead to PD leader timeout or failure to store TSO on disk in time, which will shut down the service of the entire cluster. + +* Solution: + + * Find the cause of slow writes. It might be other services that overload the system. You can check whether PD itself occupies a large amount of CPU or I/O resources. + * Try to restart PD or manually transfer leader to another PD to recover the service. + * If the problematic PD instance cannot be recovered due to environmental factors, make it offline and replace it. + +#### `PD_miss_peer_region_count` + +* Alert rule: + + `sum(pd_regions_status{type="miss_peer_region_count"}) > 100` + +* Description: + + The number of Region replicas is smaller than the value of `max-replicas`. When a TiKV machine is down and its downtime exceeds `max-down-time`, it usually leads to missing replicas for some Regions during a period of time. When a TiKV node is made offline, it might result in a small number of Regions with missing replicas. + +* Solution: + + * Find the cause of the issue by checking whether there is any TiKV machine that is down or being made offline. + * Watch the Region health panel and see whether `miss_peer_region_count` is continuously decreasing. + +### Warning-level alerts + +#### `PD_cluster_lost_connect_tikv_nums` + +* Alert rule: + + `sum(pd_cluster_status{type="store_disconnected_count"}) > 0` + +* Description: + + PD does not receive a TiKV heartbeat within 20 seconds. Normally a TiKV heartbeat comes in every 10 seconds. + +* Solution: + + * Check whether the TiKV instance is being restarted. + * Check whether the TiKV process is normal, the network is isolated, and the load is too high, and recover the service as much as possible. + * If you confirm that the TiKV instance cannot be recovered, you can make it offline. + * If you confirm that the TiKV instance can be recovered, but not in the short term, you can consider increasing the value of `max-down-time`. It will prevent the TiKV instance from being considered as irrecoverable and the data from being removed from the TiKV. + +#### `PD_cluster_low_space` + +* Alert rule: + + `sum(pd_cluster_status{type="store_low_space_count"}) > 0` + +* Description: + + Indicates that there is no sufficient space on the TiKV node. + +* Solution: + + * Check whether the space in the cluster is generally insufficient. If so, increase its capacity. + * Check whether there is any issue with Region balance scheduling. If so, it will lead to uneven data distribution. + * Check whether there is any file that occupies a large amount of disk space, such as the log, snapshot, core dump, etc. + * Lower the Region weight of the node to reduce the data volume. + * When it is not possible to release the space, consider proactively making the node offline. This prevents insufficient disk space that leads to downtime. + +#### `PD_etcd_network_peer_latency` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(etcd_network_peer_round_trip_time_seconds_bucket[1m])) by (To,instance,job,le)) > 1` + +* Description: + + The network latency between PD nodes is high. It might lead to the leader timeout and TSO disk storage timeout, which impacts the service of the cluster. + +* Solution: + + * Check the network and system load status. + * If the problematic PD instance cannot be recovered due to environmental factors, make it offline and replace it. + +#### `PD_tidb_handle_requests_duration` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(pd_client_request_handle_requests_duration_seconds_bucket{type="tso"}[1m])) by (instance,job,le)) > 0.1` + +* Description: + + It takes a longer time for PD to handle the TSO request. It is often caused by a high load. + +* Solution: + + * Check the load status of the server. + * Use pprof to analyze the CPU profile of PD. + * Manually switch the PD leader. + * If the problematic PD instance cannot be recovered due to environmental factors, make it offline and replace it. + +#### `PD_down_peer_region_nums` + +* Alert rule: + + `sum(pd_regions_status{type="down_peer_region_count"}) > 0` + +* Description: + + The number of Regions with an unresponsive peer reported by the Raft leader. + +* Solution: + + * Check whether there is any TiKV that is down, or that was just restarted, or that is busy. + * Watch the Region health panel and see whether `down_peer_region_count` is continuously decreasing. + * Check the network between TiKV servers. + +#### `PD_pending_peer_region_count` + +* Alert rule: + + `sum(pd_regions_status{type="pending_peer_region_count"}) > 100` + +* Description: + + There are too many Regions that have lagged Raft logs. It is normal that scheduling leads to a small number of pending peers, but if the number remains high, there might be an issue. + +* Solution: + + * Watch the Region health panel and see whether `pending_peer_region_count` is continuously decreasing. + * Check the network between TiKV servers, especially whether there is enough bandwidth. + +#### `PD_leader_change` + +* Alert rule: + + `count(changes(pd_server_tso{type="save"}[10m]) > 0) >= 2` + +* Description: + + The PD leader is recently switched. + +* Solution: + + * Exclude the human factors, such as restarting PD, manually transferring leader, adjusting leader priority, etc. + * Check the network and system load status. + * If the problematic PD instance cannot be recovered due to environmental factors, make it offline and replace it. + +#### `TiKV_space_used_more_than_80%` + +* Alert rule: + + `sum(pd_cluster_status{type="storage_size"}) / sum(pd_cluster_status{type="storage_capacity"}) * 100 > 80` + +* Description: + + Over 80% of the cluster space is occupied. + +* Solution: + + * Check whether it is needed to increase capacity. + * Check whether there is any file that occupies a large amount of disk space, such as the log, snapshot, core dump, etc. + +#### `PD_system_time_slow` + +* Alert rule: + + `changes(pd_server_tso{type="system_time_slow"}[10m]) >= 1` + +* Description: + + The system time rewind might happen. + +* Solution: + + Check whether the system time is configured correctly. + +#### `PD_no_store_for_making_replica` + +* Alert rule: + + `increase(pd_checker_event_count{type="replica_checker", name="no_target_store"}[1m]) > 0` + +* Description: + + There is no appropriate store for additional replicas. + +* Solution: + + * Check whether there is enough space in the store. + * Check whether there is any store for additional replicas according to the label configuration if it is configured. + +## TiKV alert rules + +This section gives the alert rules for the TiKV component. + +### Emergency-level alerts + +#### `TiKV_memory_used_too_fast` + +* Alert rule: + + `process_resident_memory_bytes{job=~"tikv",instance=~".*"} - (process_resident_memory_bytes{job=~"tikv",instance=~".*"} offset 5m) > 5*1024*1024*1024` + +* Description: + + Currently, there are no TiKV monitoring items about memory. You can monitor the memory usage of the machines in the cluster by Node_exporter. The above rule indicates that when the memory usage exceeds 5 GB within 5 minutes (the memory is occupied too fast in TiKV), an alert is triggered. + +* Solution: + + Adjust the `block-cache-size` value of both `rockdb.defaultcf` and `rocksdb.writecf`. + +#### `TiKV_GC_can_not_work` + +* Alert rule: + + `sum(increase(tikv_gcworker_gc_tasks_vec{task="gc"}[1d])) < 1 and sum(increase(tikv_gc_compaction_filter_perform[1d])) < 1` + +* Description: + + GC is not performed successfully on a TiKV instance within 24 hours, which indicates that GC is not working properly. If GC does not run in a short term, it will not cause much trouble; but if GC keeps down, more and more versions are retained, which slows down the query. + + + +### Critical-level alerts + +#### `TiKV_server_report_failure_msg_total` + +* Alert rule: + + `sum(rate(tikv_server_report_failure_msg_total{type="unreachable"}[10m])) BY (store_id) > 10` + +* Description: + + Indicates that the remote TiKV cannot be connected. + +* Solution: + + 1. Check whether the network is clear. + 2. Check whether the remote TiKV is down. + 3. If the remote TiKV is not down, check whether the pressure is too high. Refer to the solution in [`TiKV_channel_full_total`](#tikv_channel_full_total). + +#### `TiKV_channel_full_total` + +* Alert rule: + + `sum(rate(tikv_channel_full_total[10m])) BY (type, instance) > 0` + +* Description: + + This issue is often caused by the stuck Raftstore thread and high pressure on TiKV. + +* Solution: + + 1. Watch the Raft Propose monitor, and see whether the alerted TiKV node has a much higher Raft propose than other TiKV nodes. If so, it means that there are one or more hot spots on this TiKV. You need to check whether the hot spot scheduling can work properly. + 2. Watch the Raft I/O monitor, and see whether the latency increases. If the latency is high, it means a bottleneck might exist in the disk. One feasible but unsafe solution is setting `sync-log` to `false`. + 3. Watch the Raft Process monitor, and see whether the tick duration is high. If so, you need to add `raft-base-tick-interval = "2s"` under the `[raftstore]` configuration. + +#### `TiKV_write_stall` + +* Alert rule: + + `delta(tikv_engine_write_stall[10m]) > 0` + +* Description: + + The write pressure on RocksDB is too high, and a stall occurs. + +* Solution: + + 1. View the disk monitor, and troubleshoot the disk issues; + 2. Check whether there is any write hot spot on the TiKV; + 3. Set `max-sub-compactions` to a larger value under the `[rocksdb]` and `[raftdb]` configurations. + +#### `TiKV_raft_log_lag` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(tikv_raftstore_log_lag_bucket[1m])) by (le, instance)) > 5000` + +* Description: + + If this value is relatively large, it means Follower has lagged far behind Leader, and Raft cannot be replicated normally. It is possibly because the TiKV machine where Follower is located is stuck or down. + +#### `TiKV_async_request_snapshot_duration_seconds` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(tikv_storage_engine_async_request_duration_seconds_bucket{type="snapshot"}[1m])) by (le, instance, type)) > 1` + +* Description: + + If this value is relatively large, it means the load pressure on Raftstore is too high, and it might be stuck already. + +* Solution: + + Refer to the solution in [`TiKV_channel_full_total`](#tikv_channel_full_total). + +#### `TiKV_async_request_write_duration_seconds` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(tikv_storage_engine_async_request_duration_seconds_bucket{type="write"}[1m])) by (le, instance, type)) > 1` + +* Description: + + If this value is relatively large, it means the Raft write takes a long time. + +* Solution: + + 1. Check the pressure on Raftstore. See the solution in [`TiKV_channel_full_total`](#tikv_channel_full_total). + 2. Check the pressure on the apply worker thread. + +#### `TiKV_coprocessor_request_wait_seconds` + +* Alert rule: + + `histogram_quantile(0.9999, sum(rate(tikv_coprocessor_request_wait_seconds_bucket[1m])) by (le, instance, req)) > 10` + +* Description: + + If this value is relatively large, it means the pressure on the Coprocessor worker is high. There might be a slow task that makes the Coprocessor thread stuck. + +* Solution: + + 1. View the slow query log from the TiKV client log to see whether the index or full table scan is used in a query, or see whether it is needed to analyze; + 2. Check whether there is a hot spot; + 3. View the Coprocessor monitor and see whether `total` and `process` in `coprocessor table/index scan` match. If they differ a lot, it indicates too many invalid queries are performed. You can see whether there is `over seek bound`. If so, there are too many versions that GC does not handle in time. Then you need to increase the number of parallel GC threads. + +#### `TiKV_raftstore_thread_cpu_seconds_total` + +* Alert rule: + + `sum(rate(tikv_thread_cpu_seconds_total{name=~"raftstore_.*"}[1m])) by (instance, name) > 1.6` + +* Description: + + The pressure on the Raftstore thread is too high. + +* Solution: + + Refer to the solution in [`TiKV_channel_full_total`](#tikv_channel_full_total). + +#### `TiKV_raft_append_log_duration_secs` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(tikv_raftstore_append_log_duration_seconds_bucket[1m])) by (le, instance)) > 1` + +* Description: + + Indicates the time cost of appending Raft log. If it is high, it usually means I/O is too busy. + +#### `TiKV_raft_apply_log_duration_secs` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(tikv_raftstore_apply_log_duration_seconds_bucket[1m])) by (le, instance)) > 1` + +* Description: + + Indicates the time cost of applying Raft log. If it is high, it usually means I/O is too busy. + +#### `TiKV_scheduler_latch_wait_duration_seconds` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(tikv_scheduler_latch_wait_duration_seconds_bucket[1m])) by (le, instance, type)) > 1` + +* Description: + + The waiting time for the write operations to obtain the memory lock in Scheduler. If it is high, there might be many write conflicts, or that some operations that lead to conflicts take a long time to finish and block other operations that wait for the same lock. + +* Solution: + + 1. View the scheduler command duration in the Scheduler-All monitor and see which command is most time-consuming; + 2. View the scheduler scan details in the Scheduler-All monitor and see whether `total` and `process` match. If they differ a lot, there are many invalid scans. You can also see whether there is `over seek bound`. If there is too much, it indicates GC does not work in time; + 3. View the storage async snapshot/write duration in the Storage monitor and see whether the Raft operation is performed in time. + +#### `TiKV_thread_apply_worker_cpu_seconds` + +* Alert rule: + + `sum(rate(tikv_thread_cpu_seconds_total{name="apply_worker"}[1m])) by (instance) > 1.8` + +* Description: + + The pressure on the apply Raft log thread is too high. It is often caused by a burst of writes. + +### Warning-level alerts + +#### `TiKV_leader_drops` + +* Alert rule: + + `delta(tikv_pd_heartbeat_tick_total{type="leader"}[30s]) < -10` + +* Description: + + It is often caused by a stuck Raftstore thread. + +* Solution: + + 1. Refer to [`TiKV_channel_full_total`](#tikv_channel_full_total). + 2. It there is low pressure on TiKV, consider whether the PD scheduling is too frequent. You can view the Operator Create panel on the PD page, and check the types and number of the PD scheduling. + +#### `TiKV_raft_process_ready_duration_secs` + +* Alert rule: + + `histogram_quantile(0.999, sum(rate(tikv_raftstore_raft_process_duration_secs_bucket{type='ready'}[1m])) by (le, instance, type)) > 2` + +* Description: + + Indicates the time cost of handling Raft ready. If this value is large, it is often caused by the stuck appending log task. + +#### `TiKV_raft_process_tick_duration_secs` + +* Alert rule: + + `histogram_quantile(0.999, sum(rate(tikv_raftstore_raft_process_duration_secs_bucket{type=’tick’}[1m])) by (le, instance, type)) > 2` + +* Description: + + Indicates the time cost of handling Raft tick. If this value is large, it is often caused by too many Regions. + +* Solution: + + 1. Consider using a higher-level log such as `warn` or `error`. + 2. Add `raft-base-tick-interval = "2s"` under the `[raftstore]` configuration. + +#### `TiKV_scheduler_context_total` + +* Alert rule: + + `abs(delta( tikv_scheduler_context_total[5m])) > 1000` + +* Description: + + The number of write commands that are being executed by Scheduler. If this value is large, it means the task is not finished timely. + +* Solution: + + Refer to [`TiKV_scheduler_latch_wait_duration_seconds`](#tikv_scheduler_latch_wait_duration_seconds). + +#### `TiKV_scheduler_command_duration_seconds` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(tikv_scheduler_command_duration_seconds_bucket[1m])) by (le, instance, type) / 1000) > 1` + +* Description: + + Indicates the time cost of executing the Scheduler command. + +* Solution: + + Refer to [`TiKV_scheduler_latch_wait_duration_seconds`](#tikv_scheduler_latch_wait_duration_seconds). + +#### `TiKV_coprocessor_outdated_request_wait_seconds` + +* Alert rule: + + `delta(tikv_coprocessor_outdated_request_wait_seconds_count[10m]) > 0` + +* Description: + + The waiting time of the expired requests by Coprocessor. If this value is large, it means there is high pressure on Coprocessor. + +* Solution: + + Refer to [`TiKV_coprocessor_request_wait_seconds`](#tikv_coprocessor_request_wait_seconds). + +#### `TiKV_coprocessor_request_error` + +* Alert rule: + + `increase(tikv_coprocessor_request_error{reason!="lock"}[10m]) > 100` + +* Description: + + The request error of Coprocessor. + +* Solution: + + The reasons for the Coprocessor error can be divided into three types: "lock", “outdated” and “full”. “outdated” indicates that the request has a timeout. It might be caused by a long queue time or a long time to handle a single request. “full” indicates that the request queue is full. It is possibly because the running request is time-consuming, which sends all new requests in the queue. You need to check whether the time-consuming query’s execution plan is correct. + +#### `TiKV_coprocessor_request_lock_error` + +* Alert rule: + + `increase(tikv_coprocessor_request_error{reason="lock"}[10m]) > 10000` + +* Description: + + The lock requesting error of Coprocessor. + +* Solution: + + The reasons for the Coprocessor error can be divided into three types: "lock", “outdated” and “full”. “lock” indicates that the read data is being written and you need to wait a while and read again (the automatic retry happens inside ). If just a few errors of this kind occur, you can ignore them; but if there are a lot of them, you need to check whether there is a conflict between the write and the query. + +#### `TiKV_coprocessor_pending_request` + +* Alert rule: + + `delta(tikv_coprocessor_pending_request[10m]) > 5000` + +* Description: + + The queuing requests of Coprocessor. + +* Solution: + + Refer to [`TiKV_coprocessor_request_wait_seconds`](#tikv_coprocessor_request_wait_seconds). + +#### `TiKV_batch_request_snapshot_nums` + +* Alert rule: + + `sum(rate(tikv_thread_cpu_seconds_total{name=~"cop_.*"}[1m])) by (instance) / (count(tikv_thread_cpu_seconds_total{name=~"cop_.*"}) * 0.9) / count(count(tikv_thread_cpu_seconds_total) by (instance)) > 0` + +* Description: + + The Coprocessor CPU usage of a TiKV machine exceeds 90%. + +#### `TiKV_pending_task` + +* Alert rule: + + `sum(tikv_worker_pending_task_total) BY (instance,name) > 1000` + +* Description: + + The number of pending tasks of TiKV. + +* Solution: + + Check which kind of tasks has a higher value. You can normally find a solution to the Coprocessor and apply worker tasks from other metrics. + +#### `TiKV_low_space_and_add_region` + +* Alert rule: + + `count((sum(tikv_store_size_bytes{type="available"}) by (instance) / sum(tikv_store_size_bytes{type="capacity"}) by (instance) < 0.2) and (sum(tikv_raftstore_snapshot_traffic_total{type="applying"}) by (instance) > 0)) > 0` + +#### `TiKV_approximate_region_size` + +* Alert rule: + + `histogram_quantile(0.99, sum(rate(tikv_raftstore_region_size_bucket[1m])) by (le)) > 1073741824` + +* Description: + + The maximum Region approximate size that is scanned by the TiKV split checker is continually larger than 1 GB within one minute. + + + +## Node_exporter host alert rules + +This section gives the alert rules for the Node_exporter host. + +### Emergency-level alerts + +#### `NODE_disk_used_more_than_80%` + +* Alert rule: + + `node_filesystem_avail_bytes{fstype=~"(ext.|xfs)", mountpoint!~"/boot"} / node_filesystem_size_bytes{fstype=~"(ext.|xfs)", mountpoint!~"/boot"} * 100 <= 20` + +* Description: + + The disk space usage of the machine exceeds 80%. + +* Solution: + + * Log in to the machine, run the `df -h` command to check the disk space usage. + * Make a plan to increase the disk capacity or delete some data or increase cluster node depending on different situations. + +#### `NODE_disk_inode_more_than_80%` + +* Alert rule: + + `node_filesystem_files_free{fstype=~"(ext.|xfs)"} / node_filesystem_files{fstype=~"(ext.|xfs)"} * 100 < 20` + +* Description: + + The inode usage of the filesystem on the machine exceeds 80%. + +* Solution: + + * Log in to the machine and run the `df -i` command to view the node usage of the filesystem. + * Make a plan to increase the disk capacity or delete some data or increase cluster node depending on different situations. + +#### `NODE_disk_readonly` + +* Alert rule: + + `node_filesystem_readonly{fstype=~"(ext.|xfs)"} == 1` + +* Description: + + The filesystem is read-only and data cannot be written in it. It is often caused by disk failure or filesystem corruption. + +* Solution: + + * Log in to the machine and create a file to test whether it is normal. + * Check whether the disk LED is normal. If not, replace the disk and repair the filesystem of the machine. + +### Critical-level alerts + +#### `NODE_memory_used_more_than_80%` + +* Alert rule: + + `(((node_memory_MemTotal_bytes-node_memory_MemFree_bytes-node_memory_Cached_bytes)/(node_memory_MemTotal_bytes)*100)) >= 80` + +* Description: + + The memory usage of the machine exceeds 80%. + +* Solution: + + * View the Memory panel of the host in the Grafana Node Exporter dashboard, and see whether Used memory is too high and Available memory is too low. + * Log in to the machine and run the `free -m` command to view the memory usage. You can run `top` to check whether there is any abnormal process that has an overly high memory usage. + +### Warning-level alerts + +#### `NODE_node_overload` + +* Alert rule: + + `(node_load5 / count without (cpu, mode) (node_cpu_seconds_total{mode="system"})) > 1` + +* Description: + + The CPU load on the machine is relatively high. + +* Solution: + + * View the CPU Usage and Load Average of the host in the Grafana Node Exporter dashboard to check whether they are too high. + * Log in to the machine and run `top` to check the load average and the CPU usage, and see whether there is any abnormal process that has an overly high CPU usage. + +#### `NODE_cpu_used_more_than_80%` + +* Alert rule: + + `avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by(instance) * 100 <= 20` + +* Description: + + The CPU usage of the machine exceeds 80%. + +* Solution: + + * View the CPU Usage and Load Average of the host on the Grafana Node Exporter dashboard to check whether they are too high. + * Log in to the machine and run `top` to check the Load Average and the CPU Usage, and see whether there is any abnormal process that has an overly high CPU usage. + +#### `NODE_tcp_estab_num_more_than_50000` + +* Alert rule: + + `node_netstat_Tcp_CurrEstab > 50000` + +* Description: + + There are more than 50,000 TCP links in the "establish" status on the machine. + +* Solution: + + * Log in to the machine and run `ss -s` to check the number of TCP links in the "estab" status in the current system. + * Run `netstat` to check whether there is any abnormal link. + +#### `NODE_disk_read_latency_more_than_32ms` + +* Alert rule: + + `((rate(node_disk_read_time_seconds_total{device=~".+"}[5m]) / rate(node_disk_reads_completed_total{device=~".+"}[5m])) or (irate(node_disk_read_time_seconds_total{device=~".+"}[5m]) / irate(node_disk_reads_completed_total{device=~".+"}[5m])) ) * 1000 > 32` + +* Description: + + The read latency of the disk exceeds 32 ms. + +* Solution: + + * Check the disk status by viewing the Grafana Disk Performance dashboard. + * Check the read latency of the disk by viewing the Disk Latency panel. + * Check the I/O usage by viewing the Disk I/O Utilization panel. + +#### `NODE_disk_write_latency_more_than_16ms` + +* Alert rule: + + `((rate(node_disk_write_time_seconds_total{device=~".+"}[5m]) / rate(node_disk_writes_completed_total{device=~".+"}[5m])) or (irate(node_disk_write_time_seconds_total{device=~".+"}[5m]) / irate(node_disk_writes_completed_total{device=~".+"}[5m]))) * 1000 > 16` + +* Description: + + The write latency of the disk exceeds 16ms. + +* Solution: + + * Check the disk status by viewing the Grafana Disk Performance dashboard. + * Check the write latency of the disk by viewing the Disk Latency panel. + * Check the I/O usage by viewing the Disk I/O Utilization panel. + +## Blackbox_exporter TCP, ICMP, and HTTP alert rules + +This section gives the alert rules for the Blackbox_exporter TCP, ICMP, and HTTP. + +### Emergency-level alerts + +#### `TiKV_server_is_down` + +* Alert rule: + + `probe_success{group="tikv"} == 0` + +* Description: + + Failure to probe the TiKV service port. + +* Solution: + + * Check whether the machine that provides the TiKV service is down. + * Check whether the TiKV process exists. + * Check whether the network between the monitoring machine and the TiKV machine is normal. + +#### `PD_server_is_down` + +* Alert rule: + + `probe_success{group="pd"} == 0` + +* Description: + + Failure to probe the PD service port. + +* Solution: + + * Check whether the machine that provides the PD service is down. + * Check whether the PD process exists. + * Check whether the network between the monitoring machine and the PD machine is normal. + +#### `Node_exporter_server_is_down` + +* Alert rule: + + `probe_success{group="node_exporter"} == 0` + +* Description: + + Failure to probe the Node_exporter service port. + +* Solution: + + * Check whether the machine that provides the Node_exporter service is down. + * Check whether the Node_exporter process exists. + * Check whether the network between the monitoring machine and the Node_exporter machine is normal. + +#### `Blackbox_exporter_server_is_down` + +* Alert rule: + + `probe_success{group="blackbox_exporter"} == 0` + +* Description: + + Failure to probe the Blackbox_Exporter service port. + +* Solution: + + * Check whether the machine that provides the Blackbox_Exporter service is down. + * Check whether the Blackbox_Exporter process exists. + * Check whether the network between the monitoring machine and the Blackbox_Exporter machine is normal. + +#### `Grafana_server_is_down` + +* Alert rule: + + `probe_success{group="grafana"} == 0` + +* Description: + + Failure to probe the Grafana service port. + +* Solution: + + * Check whether the machine that provides the Grafana service is down. + * Check whether the Grafana process exists. + * Check whether the network between the monitoring machine and the Grafana machine is normal. + +### Warning-level alerts + +#### `BLACKER_ping_latency_more_than_1s` + +* Alert rule: + + `max_over_time(probe_duration_seconds{job=~"blackbox_exporter.*_icmp"}[1m]) > 1` + +* Description: + + The ping latency exceeds 1 second. + +* Solution: + + * View the ping latency between the two nodes on the Grafana Blackbox Exporter dashboard to check whether it is too high. + * Check the tcp panel on the Grafana Node Exporter dashboard to check whether there is any packet loss. diff --git a/content/docs/7.1/deploy/monitor/api.md b/content/docs/7.1/deploy/monitor/api.md new file mode 100644 index 00000000..4d39f238 --- /dev/null +++ b/content/docs/7.1/deploy/monitor/api.md @@ -0,0 +1,66 @@ +--- +title: Monitoring API +description: Learn the API of TiKV monitoring services. +menu: + "7.1": + parent: Monitor and Alert-7.1 + weight: 2 + identifier: Monitoring API-7.1 +--- + +You can use the following two types of interfaces to monitor the TiKV cluster state: + +- [The state interface](#use-the-state-interface): this interface uses the HTTP interface to get the component information. +- [The metrics interface](#use-the-metrics-interface): this interface uses Prometheus to record the detailed information of the various operations in components and views these metrics using Grafana. + +## Use the state interface + +The state interface monitors the basic information of a specific component in the TiKV cluster. It can also act as the monitor interface for Keepalive messages. In addition, the state interface for the Placement Driver (PD) can get the details of the entire TiKV cluster. + +### PD server + +- PD API address: `http://${host}:${port}/pd/api/v1/${api_name}` +- Default port: `2379` +- Details about API names: see [PD API doc](https://download.pingcap.com/pd-api-v1.html) + +The PD interface provides the state of all the TiKV servers and the information about load balancing. See the following example for the information about a single-node TiKV cluster: + +```bash +curl http://127.0.0.1:2379/pd/api/v1/stores +{ + "count": 1, # The number of TiKV nodes. + "stores": [ # The list of TiKV nodes. + # The details about the single TiKV node. + { + "store": { + "id": 1, + "address": "127.0.0.1:20160", + "version": "3.0.0-beta", + "state_name": "Up" + }, + "status": { + "capacity": "20 GiB", # The total capacity. + "available": "16 GiB", # The available capacity. + "leader_count": 17, + "leader_weight": 1, + "leader_score": 17, + "leader_size": 17, + "region_count": 17, + "region_weight": 1, + "region_score": 17, + "region_size": 17, + "start_ts": "2019-03-21T14:09:32+08:00", # The starting timestamp. + "last_heartbeat_ts": "2019-03-21T14:14:22.961171958+08:00", # The timestamp of the last heartbeat. + "uptime": "4m50.961171958s" + } + } + ] +``` + +## Use the metrics interface + +The metrics interface monitors the state and performance of the entire TiKV cluster. + +- If you use other deployment ways, [deploy Prometheus and Grafana](../deploy) before using this interface. + +After Prometheus and Grafana are successfully deployed, [configure Grafana](../deploy#configure-grafana). diff --git a/content/docs/7.1/deploy/monitor/deploy.md b/content/docs/7.1/deploy/monitor/deploy.md new file mode 100644 index 00000000..31be401b --- /dev/null +++ b/content/docs/7.1/deploy/monitor/deploy.md @@ -0,0 +1,225 @@ +--- +title: Deploy Monitoring Services +description: Learn how to deploy monitoring services for the TiKV cluster. +menu: + "7.1": + parent: Monitor and Alert-7.1 + weight: 3 + identifier: Deploy Monitoring Services-7.1 +--- + +This document is intended for users who want to manually deploy TiKV monitoring and alert services. + +If you deploy the TiKV cluster using TiUP, the monitoring and alert services are automatically deployed, and no manual deployment is needed. + +## Deploy Prometheus and Grafana + +Assume that the TiKV cluster topology is as follows: + +| Name | Host IP | Services | +|:----- |:--------------- |:------------------------------------- | +| Node1 | 192.168.199.113 | PD1, node_export, Prometheus, Grafana | +| Node2 | 192.168.199.114 | PD2, node_export | +| Node3 | 192.168.199.115 | PD3, node_export | +| Node4 | 192.168.199.116 | TiKV1, node_export | +| Node5 | 192.168.199.117 | TiKV2, node_export | +| Node6 | 192.168.199.118 | TiKV3, node_export | + +### Step 1: Download the binary package + +```bash +# Downloads the package. +wget https://download.pingcap.org/prometheus-2.8.1.linux-amd64.tar.gz +wget https://download.pingcap.org/node_exporter-0.17.0.linux-amd64.tar.gz +wget https://download.pingcap.org/grafana-6.1.6.linux-amd64.tar.gz +``` + +```bash +# Extracts the package. +tar -xzf prometheus-2.8.1.linux-amd64.tar.gz +tar -xzf node_exporter-0.17.0.linux-amd64.tar.gz +tar -xzf grafana-6.1.6.linux-amd64.tar.gz +``` + +### Step 2: Start `node_exporter` on all nodes + +```bash +cd node_exporter-0.17.0.linux-amd64 + +# Starts the node_exporter service. +$ ./node_exporter --web.listen-address=":9100" \ + --log.level="info" & +``` + +### Step 3: Start Prometheus on Node1 + +Edit the Prometheus configuration file: + +```bash +cd prometheus-2.8.1.linux-amd64 && +vi prometheus.yml +``` + +```ini +... + +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + evaluation_interval: 15s # By default, scrape targets every 15 seconds. + # scrape_timeout is set to the global default value (10s). + external_labels: + cluster: 'test-cluster' + monitor: "prometheus" + +scrape_configs: + - job_name: 'overwritten-nodes' + honor_labels: true # Do not overwrite job & instance labels. + static_configs: + - targets: + - '192.168.199.113:9100' + - '192.168.199.114:9100' + - '192.168.199.115:9100' + - '192.168.199.116:9100' + - '192.168.199.117:9100' + - '192.168.199.118:9100' + + - job_name: 'pd' + honor_labels: true # Do not overwrite job & instance labels. + static_configs: + - targets: + - '192.168.199.113:2379' + - '192.168.199.114:2379' + - '192.168.199.115:2379' + + - job_name: 'tikv' + honor_labels: true # Do not overwrite job & instance labels. + static_configs: + - targets: + - '192.168.199.116:20180' + - '192.168.199.117:20180' + - '192.168.199.118:20180' + +... + +``` + +Start the Prometheus service: + +```bash +$ ./prometheus \ + --config.file="./prometheus.yml" \ + --web.listen-address=":9090" \ + --web.external-url="http://192.168.199.113:9090/" \ + --web.enable-admin-api \ + --log.level="info" \ + --storage.tsdb.path="./data.metrics" \ + --storage.tsdb.retention="15d" & +``` + +### Step 4: Start Grafana on Node1 + +Edit the Grafana configuration file: + +```bash +cd grafana-6.1.6 && +vi conf/grafana.ini +``` + +```init +... + +[paths] +data = ./data +logs = ./data/log +plugins = ./data/plugins +[server] +http_port = 3000 +domain = 192.168.199.113 +[database] +[session] +[analytics] +check_for_updates = true +[security] +admin_user = admin +admin_password = admin +[snapshots] +[users] +[auth.anonymous] +[auth.basic] +[auth.ldap] +[smtp] +[emails] +[log] +mode = file +[log.console] +[log.file] +level = info +format = text +[log.syslog] +[event_publisher] +[dashboards.json] +enabled = false +path = ./data/dashboards +[metrics] +[grafana_net] +url = https://grafana.net + +... + +``` + +Start the Grafana service: + +```bash +$ ./bin/grafana-server \ + --config="./conf/grafana.ini" & +``` + +## Configure Grafana + +This section describes how to configure Grafana. + +### Step 1: Add a Prometheus data source + +1. Log in to the Grafana Web interface. + + - Default address: [http://localhost:3000](http://localhost:3000) + - Default account: admin + - Default password: admin + + {{< info >}} +For the **Change Password** step, you can choose **Skip**. + {{< /info >}} + +2. In the Grafana sidebar menu, click **Data Source** within the **Configuration**. + +3. Click **Add data source**. + +4. Specify the data source information. + + - Specify a **Name** for the data source. + - For **Type**, select **Prometheus**. + - For **URL**, specify the Prometheus address. + - Specify other fields as needed. + +5. Click **Add** to save the new data source. + +### Step 2: Import a Grafana dashboard + +To import a Grafana dashboard for the PD server and the TiKV server, take the following steps respectively: + +1. Click the Grafana logo to open the sidebar menu. + +2. In the sidebar menu, click **Dashboards** -> **Import** to open the **Import Dashboard** window. + +3. Click **Upload .json File** to upload a JSON file (Download [TiKV Grafana configuration file](https://github.com/tikv/tikv/tree/release-5.0/metrics/grafana) and [PD Grafana configuration file](https://github.com/tikv/pd/tree/release-5.0/metrics/grafana)). + +4. Click **Load**. + +5. Select a Prometheus data source. + +6. Click **Import**. A Prometheus dashboard is imported. + +## View component metrics + +Click **New dashboard** in the top menu and choose the dashboard you want to view. diff --git a/content/docs/7.1/deploy/monitor/framework.md b/content/docs/7.1/deploy/monitor/framework.md new file mode 100644 index 00000000..dd2f180b --- /dev/null +++ b/content/docs/7.1/deploy/monitor/framework.md @@ -0,0 +1,28 @@ +--- +title: Monitoring Framework +description: Use Prometheus and Grafana to build the TiKV monitoring framework. +menu: + "7.1": + parent: Monitor and Alert-7.1 + weight: 1 + identifier: Monitoring Framework-7.1 +--- + +The TiKV monitoring framework adopts two open-source projects: [Prometheus](https://github.com/prometheus/prometheus) and [Grafana](https://github.com/grafana/grafana). TiKV uses Prometheus to store the monitoring and performance metrics, and uses Grafana to visualize these metrics. + +## About Prometheus in TiKV + +Prometheus is a time-series database and has a multi-dimensional data model and flexible query language. Prometheus consists of multiple components. Currently, TiKV uses the following components: + +- Prometheus Server: to scrape and store time series data +- Client libraries: to customize necessary metrics in the application +- AlertManager: for the alerting mechanism + +{{< figure + src="/img/docs/prometheus-in-tikv2.png" + caption="Prometheus in TiKV" + number="" >}} + +## About Grafana in TiKV + +[Grafana](https://github.com/grafana/grafana) is an open-source project for analyzing and visualizing metrics. TiKV uses Grafana to display the performance metrics. diff --git a/content/docs/7.1/deploy/monitor/grafana.md b/content/docs/7.1/deploy/monitor/grafana.md new file mode 100644 index 00000000..98715744 --- /dev/null +++ b/content/docs/7.1/deploy/monitor/grafana.md @@ -0,0 +1,47 @@ +--- +title: Export Grafana Snapshots +description: Learn how to export snapshots of Grafana Dashboard, and how to visualize these files. +menu: + "7.1": + parent: Monitor and Alert-7.1 + weight: 4 + identifier: Export Grafana Snapshots-7.1 +--- + +Metrics data is important in troubleshooting. When you request remote assistance, sometimes the support staff need to view the Grafana dashboards to diagnose problems. [MetricsTool](https://metricstool.pingcap.com/) can help export snapshots of Grafana dashboards as local files and visualize these snapshots. You can share these snapshots with outsiders and allow them to accurately read out the graphs, without giving out access to other sensitive information on the Grafana server. + +## Usage + +MetricsTool can be accessed from . It consists of three sets of tools: + +* **Export**: A user script running on the browser's Developer Tool, allowing you to download a snapshot of all visible panels in the current dashboard on any Grafana v6.x.x server. + +* **Visualize**: A web page visualizing the exported snapshot files. The visualized snapshots can be operated in the same way as live Grafana dashboards. + +* **Import**: Instructions to import the exported snapshot back into an actual Grafana instance. + +## FAQs + +### What is the advantage of this tool compared with screenshot or PDF printing? + +The snapshot files exported by MetricsTool contain the actual values when they are taken. And the Visualizer allows you to interact with the rendered graphs as if it is a live Grafana dashboard, supporting operations like toggling series, zooming into a smaller time range, and checking the precise value at a given time. This makes MetricsTool much more powerful than images or PDFs. + +### What are included in the snapshot file? + +The snapshot file contains the values of all graphs and panels in the selected time range. It does not save the original metrics from the data sources (and thus you cannot edit the query expression in the Visualizer). + +### Will the Visualizer save the uploaded snapshot files in PingCAP's servers? + +No, the Visualizer parses the snapshot files entirely inside your browser. Nothing will be sent to PingCAP. You are free to view snapshot files received from sensitive sources, and no need to worry about these leaking to third parties through the Visualizer. + +### Can it export metrics besides Grafana? + +No, we only support Grafana v6.x.x at the moment. + +### Will there be problems to execute the script before all metrics are loaded? + +No, the script UI will notify you to wait for all metrics to be loaded. However, you can manually skip waiting and export the snapshot in case of some metrics loading for too long. + +### Can we share a link to a visualized snapshot? + +No, but you can share the snapshot file, with instruction on how to use the Visualizer to view it. If you truly need a world-readable URL, you may also try the public `snapshot.raintank.io` service built into Grafana, but make sure all privacy concerns are cleared before doing so. diff --git a/content/docs/7.1/deploy/monitor/key-metrics.md b/content/docs/7.1/deploy/monitor/key-metrics.md new file mode 100644 index 00000000..292065ac --- /dev/null +++ b/content/docs/7.1/deploy/monitor/key-metrics.md @@ -0,0 +1,117 @@ +--- +title: Key Metrics +description: Learn some key metrics displayed on the Grafana Overview dashboard. +menu: + "7.1": + parent: Monitor and Alert-7.1 + weight: 5 + identifier: Key Metrics-7.1 +--- + +If your TiKV cluster is deployed using TiUP, the monitoring system is deployed at the same time. For more details, see [Overview of the TiKV Monitoring Framework](https://pingcap.com/docs/stable/reference/key-monitoring-metrics/overview-dashboard/). + +The Grafana dashboard is divided into a series of sub-dashboards which include Overview, PD, TiKV, and so on. You can use various metrics to diagnose the cluster. + +At the same time, you can also deploy your Grafana server to monitor the TiKV cluster, especially when you use TiKV without TiDB. This document provides a detailed description of key metrics so that you can monitor the Prometheus metrics you are interested in. + +## Key metrics description + +To understand the key metrics, check the following table: + +Service | Metric Name | Description | Normal Range +---- | ---------------- | ---------------------------------- | -------------- +Cluster | tikv_store_size_bytes | The size of storage. The metric has a `type` label (such as "capacity", "available"). | +gRPC | tikv_grpc_msg_duration_seconds | Bucketed histogram of gRPC server messages. The metric has a `type` label which represents the type of the server message. You can count the metric and calculate the QPS. | +gRPC | tikv_grpc_msg_fail_total | The total number of gRPC message handling failure. The metric has a `type` label which represents gRPC message type. | +gRPC | grpc batch size of gRPC requests | grpc batch size of gRPC requests. | +Scheduler | tikv_scheduler_too_busy_total | The total count of too busy schedulers. The metric has a `type` label which represents the scheduler type. | +Scheduler | tikv_scheduler_contex_total | The total number of pending commands. The scheduler receives commands from clients, executes them against the MVCC layer storage engine. | +Scheduler | tikv_scheduler_stage_total | Total number of commands on each stage. The metric has two labels: `type` and `stage`. `stage` represents the stage of executed commands like "read_finish", "async_snapshot_err", "snapshot", and so on. | +Scheduler | tikv_scheduler_commands_pri_total | Total count of different priority commands. The metric has a `priority` label. | +Server | tikv_server_grpc_resp_batch_size | grpc batch size of gRPC responses. | +Server | tikv_server_report_failure_msg_total | Total number of reporting failure messages. The metric has two labels: `type` and `store_id`. `type` represents the failure type, and `store_id` represents the destination peer store ID. | +Server | tikv_server_raft_message_flush_total | Total number of raft messages flushed immediately. | +Server | tikv_server_raft_message_recv_total | Total number of raft messages received. | +Server | tikv_region_written_keys | Histogram of written keys for regions. | +Server | tikv_server_send_snapshot_duration_seconds | Bucketed histogram of duration in which the server sends snapshots. | +Server | tikv_region_written_bytes | Histogram of bytes written for regions. | +Raft | tikv_raftstore_leader_missing | Total number of leader missed regions. | +Raft | tikv_raftstore_region_count | The number of regions collected in each TiKV node. The label `type` has `region` and `leader`. `region` represents regions collected, and `leader` represents the number of leaders in each TiKV node. | +Raft | tikv_raftstore_region_size | Bucketed histogram of approximate region size. | +Raft | tikv_raftstore_apply_log_duration_seconds | Bucketed histogram of the duration in which each peer applies log. | +Raft | tikv_raftstore_commit_log_duration_seconds | Bucketed histogram of the duration in which each peer commits logs. | +Raft | tikv_raftstore_raft_ready_handled_total | Total number of Raft ready handled. The metric has a label `type`. | +Raft | tikv_raftstore_raft_process_duration_secs | Bucketed histogram of duration in which each peer processes Raft. The metric has a label `type`. | +Raft | tikv_raftstore_event_duration | Duration of raft store events. The metric has a label `type`. | +Raft | tikv_raftstore_raft_sent_message_total | Total number of messages sent by Raft ready. The metric has a label `type`. | +Raft | tikv_raftstore_raft_dropped_message_total | Total number of messages dropped by Raft. The metric has a label `type`. | +Raft | tikv_raftstore_apply_proposal | The count of proposals sent by a region at once. | +Raft | tikv_raftstore_proposal_total | Total number of proposals made. The metric has a label `type`. | +Raft | tikv_raftstore_request_wait_time_duration_secs | Bucketed histogram of request wait time duration. | +Raft | tikv_raftstore_propose_log_size | Bucketed histogram of the size of each peer proposing log. | +Raft | tikv_raftstore_apply_wait_time_duration_secs | Bucketed histogram of apply task wait time duration. | +Raft | tikv_raftstore_admin_cmd_total | Total number of admin command processed. The metric has 2 labels `type` and `status`. | +Raft | tikv_raftstore_check_split_total | Total number of raftstore split check. The metric has a label `type`. | +Raft | tikv_raftstore_check_split_duration_seconds | Bucketed histogram of duration for the raftstore split check. | +Raft | tikv_raftstore_local_read_reject_total | Total number of rejections from the local reader. The metric has a label `reason` which represents the rejection reason. | +Raft | tikv_raftstore_snapshot_duration_seconds | Bucketed histogram of raftstore snapshot process duration. The metric has a label `type`. | +Raft | tikv_raftstore_snapshot_traffic_total | The total amount of raftstore snapshot traffic. The metric has a label `type`. | +Raft | tikv_raftstore_local_read_executed_requests | Total number of requests directly executed by local reader. | +Coprocessor | tikv_coprocessor_request_duration_seconds | Bucketed histogram of coprocessor request duration. The metric has a label `req`. | +Coprocessor | tikv_coprocessor_request_error | Total number of push down request error. The metric has a label `reason`. | +Coprocessor | tikv_coprocessor_scan_keys | Bucketed histogram of scan keys observed per request. The metric has a label `req` which represents the tag of requests. | +Coprocessor | tikv_coprocessor_rocksdb_perf | Total number of RocksDB internal operations from PerfContext. The metric has 2 labels `req` and `metric`. `req` represents the tag of requests and `metric` is performance metric like "block_cache_hit_count", "block_read_count", "encrypt_data_nanos", and so on. | +Coprocessor | tikv_coprocessor_executor_count | The number of various query operations. The metric has a single label `type` which represents the related query operation (for example, "limit", "top_n", and "batch_table_scan"). | +Coprocessor | tikv_coprocessor_response_bytes | Total bytes of response body. | +Storage | tikv_storage_mvcc_versions | Histogram of versions for each key. | +Storage | tikv_storage_mvcc_gc_delete_versions | Histogram of versions deleted by GC for each key. | +Storage | tikv_storage_mvcc_conflict_counter | Total number of conflict error. The metric has a label `type`. | +Storage | tikv_storage_mvcc_duplicate_cmd_counter | Total number of duplicated commands. The metric has a label `type`. | +Storage | tikv_storage_mvcc_check_txn_status | Counter of different results of `check_txn_status`. The metric has a label `type`. | +Storage | tikv_storage_command_total | Total number of commands received. The metric has a label `type`. | +Storage | tikv_storage_engine_async_request_duration_seconds | Bucketed histogram of processing successful asynchronous requests. The metric has a label `type`. | +Storage | tikv_storage_engine_async_request_total | Total number of engine asynchronous requests. The metric has 2 labels `type` and `status`. | +GC | tikv_gcworker_gc_task_fail_vec | Counter of failed GC tasks. The metric has a label `task`. | +GC | tikv_gcworker_gc_task_duration_vec | Duration of GC tasks execution. The metric has a label `task`. | +GC | tikv_gcworker_gc_keys | Counter of keys affected during GC. The metric has two labels `cf` and `tag`. | +GC | tikv_gcworker_autogc_processed_regions | Processed regions by auto GC. The metric has a label `type`. | +GC | tikv_gcworker_autogc_safe_point | Safe point used for auto GC. The metric has a label `type`. | +Snapshot | tikv_snapshot_size | Size of snapshot. | +Snapshot | tikv_snapshot_kv_count | Total number of KVs in the snapshot | +Snapshot | tikv_worker_handled_task_total | Total number of tasks handled by the worker. The metric has a label `name`. | +Snapshot | tikv_worker_pending_task_total | The number of tasks currently running by the worker or pending. The metric has a label `name`.| +Snapshot | tikv_futurepool_handled_task_total | The total number of tasks handled by `future_pool`. The metric has a label `name`. | +Snapshot | tikv_snapshot_ingest_sst_duration_seconds | Bucketed histogram of RocksDB ingestion durations | +Snapshot | tikv_futurepool_pending_task_total | Current future_pool pending + running tasks. The metric has a label `name`. | +RocksDB | tikv_engine_get_served | queries served by engine. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_write_stall | Histogram of write stall. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_size_bytes | Sizes of each column families. The metric has two labels: `db` and `type`. `db` represents which database is being counted (for example, "kv", "raft"), and `type` represents the type of column families (for example, "default", "lock", "raft", "write"). | +RocksDB | tikv_engine_flow_bytes | Bytes and keys of read/write. The metric has `type` label (for example, "capacity", "available"). | +RocksDB | tikv_engine_wal_file_synced | The number of times WAL sync is done. The metric has a label `db`. | +RocksDB | tikv_engine_get_micro_seconds | Histogram of time used to get micros. The metric has two labels: `db` and `type`. | +RocksDB | tikv_engine_locate | The number of calls to seek/next/prev. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_seek_micro_seconds | Histogram of seek micros. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_write_served | Write queries served by engine. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_write_micro_seconds | Histogram of write micros. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_write_wal_time_micro_seconds | Histogram of duration for write WAL micros. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_event_total | Number of engine events. The metric has 3 labels `db`, `cf` and `type`. | +RocksDB | tikv_engine_wal_file_sync_micro_seconds | Histogram of WAL file sync micros. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_sst_read_micros | Histogram of SST read micros. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_compaction_time | Histogram of compaction time. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_block_cache_size_bytes | Usage of each column families' block cache. The metric has 2 labels `db` and `cf`. | +RocksDB | tikv_engine_compaction_reason | The number of compaction reasons. The metric has 3 labels `db`, `cf` and `reason`. | +RocksDB | tikv_engine_cache_efficiency | Efficiency of RocksDB's block cache. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_memtable_efficiency | Hit and miss of memtable. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_bloom_efficiency | Efficiency of RocksDB's bloom filter. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_estimate_num_keys | Estimate num keys of each column families. The metric has 2 labels `db` and `cf`. | +RocksDB | tikv_engine_compaction_flow_bytes | Bytes of read/write during compaction | +RocksDB | tikv_engine_bytes_per_read | Histogram of bytes per read. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_read_amp_flow_bytes | Bytes of read amplification. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_bytes_per_write | tikv_engine_bytes_per_write. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_num_snapshots | Number of unreleased snapshots. The metric has a label `db`. | +RocksDB | tikv_engine_pending_compaction_bytes | Pending compaction bytes. The metric has 2 labels `db` and `cf`. | +RocksDB | tikv_engine_num_files_at_level | Number of files at each level. The metric has 3 labels `db`, `cf` and `level`. | +RocksDB | tikv_engine_compression_ratio | Compression ratio at different levels. The metric has 3 labels `db`, `cf` and `level`. | +RocksDB | tikv_engine_oldest_snapshot_duration | Oldest unreleased snapshot duration in seconds. The metric has a label `db`. | +RocksDB | tikv_engine_write_stall_reason | QPS of each reason which causes TiKV write stall. The metric has 2 labels `db` and `type`. | +RocksDB | tikv_engine_memory_bytes | Sizes of each column families. The metric has 3 labels `db`, `cf` and `type`. | diff --git a/content/docs/7.1/deploy/monitor/monitor.md b/content/docs/7.1/deploy/monitor/monitor.md new file mode 100644 index 00000000..9fe93c74 --- /dev/null +++ b/content/docs/7.1/deploy/monitor/monitor.md @@ -0,0 +1,18 @@ +--- +title: Monitor and Alert +description: Learn how to monitor a TiKV cluster. +menu: + "7.1": + parent: Deploy-7.1 + weight: 4 + identifier: Monitor and Alert-7.1 +--- + +This section introduces how to monitor a TiKV cluster. + +- [Monitor Framework](../framework) +- [Monitor API](../api) +- [Deploy Monitoring Services](../deploy) +- [Export Grafana Shapshots](../grafana) +- [Key Metrics](../key-metrics) +- [TiKV Cluster Alert Rules](../alert) diff --git a/content/docs/7.1/deploy/operate/maintain.md b/content/docs/7.1/deploy/operate/maintain.md new file mode 100644 index 00000000..1cbb214e --- /dev/null +++ b/content/docs/7.1/deploy/operate/maintain.md @@ -0,0 +1,211 @@ +--- +title: TiUP Common Operations +description: Learn the common operations to operate and maintain a TiKV cluster using TiUP +menu: + "7.1": + parent: Operate TiKV-7.1 + weight: 3 + identifier: TiUP Common Operations-7.1 +--- + +This document describes the following common operations when you operate and maintain a TiKV cluster using TiUP. + +- [View the cluster list](./#view-the-cluster-list) +- [Start the cluster](./#start-the-cluster) +- [View the cluster status](./#view-the-cluster-status) +- [Modify the configuration](./#modify-the-configuration) +- [Rename the cluster](./#rename-the-cluster) +- [Stop the cluster](./#stop-the-cluster) +- [Clean up cluster data](./#clean-up-cluster-data) +- [Destroy the cluster](./#destroy-the-cluster) + +## View the cluster list + +You can manage multiple TiKV clusters with the TiUP cluster component. + +To view all the deployed TiKV clusters, run the following command: + +```shell +tiup cluster list +``` + +## Start the cluster + +To start the cluster, run the following command: + +```shell +tiup cluster start ${cluster-name} +``` + +{{< info >}} +The components in the TiKV cluster are started by TiUP in the following order: + +**PD -> TiKV -> Prometheus -> Grafana -> Node Exporter -> Blackbox Exporter** +{{< /info >}} + +You can start only some of the components by adding the `-R` or `-N` parameters in the command. For example: + +- This command starts only the PD component: + + ```shell + tiup cluster start ${cluster-name} -R pd + ``` + +- This command starts only the PD components on the `1.2.3.4` and `1.2.3.5` hosts: + + ```shell + tiup cluster start ${cluster-name} -N 1.2.3.4:2379,1.2.3.5:2379 + ``` + +{{< info >}} +If you start the components with `-R` or `-N` parameters, make sure the order of components is correct. For example, start the PD component before the TiKV component. Otherwise, the start might fail. +{{< /info >}} + +## View the cluster status + +After starting the cluster, check the status of each component to ensure that they are up and running. TiUP provides a `display` command to do so, and you don't have to log in to every machine to view the component status. + +```shell +tiup cluster display ${cluster-name} +``` + +## Modify the configuration + +When the cluster is in operation, if you need to modify the parameters of a component, run the `edit-config` command. The detailed steps are as follows: + +1. Open the configuration file of the cluster in the editing mode: + + ```shell + tiup cluster edit-config ${cluster-name} + ``` + +2. Configure the parameters: + + - If the configuration is globally effective for a component, edit `server_configs`: + + ``` + server_configs: + tikv: + server.status-thread-pool-size: 2 + ``` + + - If the configuration takes effect on a specific node, edit the configuration in `config` of the node: + + ``` + tikv_servers: + - host: 10.0.1.11 + port: 4000 + config: + server.status-thread-pool-size: 2 + ``` + + For the parameter format, see the [TiUP parameter template](https://github.com/pingcap/tiup/blob/master/embed/templates/examples/topology.example.yaml). + + For more information on the configuration parameters of components, refer to [TiKV `config.toml.example`](https://github.com/tikv/tikv/blob/master/etc/config-template.toml), and [PD `config.toml.example`](https://github.com/tikv/pd/blob/master/conf/config.toml). + +3. Rolling update the configuration and restart the corresponding components by running the `reload` command: + + ```shell + tiup cluster reload ${cluster-name} [-N ] [-R ] + ``` + +### Example + +If you want to set the status thread pool size parameter (`status-thread-pool-size` in the [server](https://github.com/tikv/tikv/blob/master/etc/config-template.toml) module) to `2` in tikv-server, edit the configuration as follows: + +``` +server_configs: + tikv: + server.status-thread-pool-size: 2 +``` + +Then, run the `tiup cluster reload ${cluster-name} -R tikv` command to rolling restart the TiKV component. + +## Rename the cluster + +After deploying and starting the cluster, you can rename the cluster using the `tiup cluster rename` command: + +```shell +tiup cluster rename ${cluster-name} ${new-name} +``` + +{{< info >}} ++ The operation of renaming a cluster restarts the monitoring system (Prometheus and Grafana). ++ After a cluster is renamed, some panels with the old cluster name might remain on Grafana. You need to delete them manually. +{{< /info >}} + +## Stop the cluster + +To stop the cluster, run the following command: + +```shell +tiup cluster stop ${cluster-name} +``` + +{{< info >}} +The components in the TiKV cluster are stopped by TiUP in the following order: + +**Grafana -> Prometheus -> TiKV -> PD -> Node Exporter -> Blackbox Exporter** +{{< /info >}} + +Similar to the `start` command, the `stop` command supports stopping some of the components by adding the `-R` or `-N` parameters. For example: + +- This command stops only the TiKV component: + + ```shell + tiup cluster stop ${cluster-name} -R tikv + ``` + +- This command stops only the components on the `1.2.3.4` and `1.2.3.5` hosts: + + ```shell + tiup cluster stop ${cluster-name} -N 1.2.3.4:4000,1.2.3.5:4000 + ``` + +## Clean up cluster data + +The operation of cleaning up cluster data stops all the services and cleans up the data directory or/and log directory. The operation cannot be reverted, so proceed **with caution**. + +- Clean up the data of all services in the cluster, but keep the logs: + + ```shell + tiup cluster clean ${cluster-name} --data + ``` + +- Clean up the logs of all services in the cluster, but keep the data: + + ```shell + tiup cluster clean ${cluster-name} --log + ``` + +- Clean up the data and logs of all services in the cluster: + + ```shell + tiup cluster clean ${cluster-name} --all + ``` + +- Clean up the logs and data of all services except Prometheus: + + ```shell + tiup cluster clean ${cluster-name} --all --ignore-role prometheus + ``` + +- Clean up the logs and data of all services except the `172.16.13.11:9000` instance: + + ```shell + tiup cluster clean ${cluster-name} --all --ignore-node 172.16.13.11:9000 + ``` + +- Clean up the logs and data of all services except the `172.16.13.12` node: + + ```shell + tiup cluster clean ${cluster-name} --all --ignore-node 172.16.13.12 + ``` + +## Destroy the cluster + +The destroy operation stops the services and clears the data directory and deployment directory. The operation cannot be reverted, so proceed **with caution**. + +```shell +tiup cluster destroy ${cluster-name} +``` diff --git a/content/docs/7.1/deploy/operate/operate.md b/content/docs/7.1/deploy/operate/operate.md new file mode 100644 index 00000000..4ea35f3a --- /dev/null +++ b/content/docs/7.1/deploy/operate/operate.md @@ -0,0 +1,15 @@ +--- +title: Operate TiKV +description: Learn how to maintain and operate a TiKV cluster. +menu: + "7.1": + parent: Deploy-7.1 + weight: 5 + identifier: Operate TiKV-7.1 +--- + +This section introduces how to maintain and operate a TiKV cluster, including the following operations: + +- [Upgrade a TiKV cluster using TiUP](../upgrade) +- [Scale out/in a TiKV cluster using TiUP](../scale) +- [Maintain a TiKV cluster using TiUP](../maintain) diff --git a/content/docs/7.1/deploy/operate/scale.md b/content/docs/7.1/deploy/operate/scale.md new file mode 100644 index 00000000..565fa012 --- /dev/null +++ b/content/docs/7.1/deploy/operate/scale.md @@ -0,0 +1,175 @@ +--- +title: Scale +description: Learn how to scale out/in TiKV using TiUP +menu: + "7.1": + parent: Operate TiKV-7.1 + weight: 2 + identifier: Scale-7.1 +--- + +The capacity of a TiKV cluster can be increased or decreased without interrupting online services. + +This document describes how to scale a TiKV or PD cluster using TiUP. + +For example, assume that the topology of the cluster is as follows: + +| Host IP | Service | +|:-------- |:------- | +| 10.0.1.1 | Monitor | +| 10.0.1.2 | PD | +| 10.0.1.3 | PD | +| 10.0.1.4 | PD | +| 10.0.1.5 | TiKV | +| 10.0.1.6 | TiKV | +| 10.0.1.7 | TiKV | + +## Scale out a TiKV cluster + +If you want to add a TiKV node to the `10.0.1.8` host, take the following steps. + +1. Configure the scale-out topology + + Put the following contents in the `scale-out-tikv.yaml` file: + + ```yaml + tikv_servers: + - host: 10.0.1.8 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: /data/deploy/install/deploy/tikv-20160 + data_dir: /data/deploy/install/data/tikv-20160 + log_dir: /data/deploy/install/log/tikv-20160 + ``` + + To view the configuration of the current cluster, run `tiup cluster edit-config `. Because the parameter configuration of `global` and `server_configs` is inherited by `scale-out-tikv.yaml` and thus also takes effect in `scale-out-tikv.yaml`. + +2. Run the scale-out command + + ```shell + tiup cluster scale-out scale-out-tikv.yaml + ``` + + If you see the message "Scaled cluster out successfully", it means that the scale-out operation is successfully completed. + +3. Check the cluster status + + ```shell + tiup cluster display + ``` + + Access the monitoring platform at using your browser to monitor the status of the cluster and the new node. + +After the scale-out, the cluster topology is as follows: + +| Host IP | Service | +|:-------- |:------- | +| 10.0.1.1 | Monitor | +| 10.0.1.2 | PD | +| 10.0.1.3 | PD | +| 10.0.1.4 | PD | +| 10.0.1.5 | TiKV | +| 10.0.1.6 | TiKV | +| 10.0.1.7 | TiKV | +| 10.0.1.8 | **TiKV**| + +## Scale out a PD cluster + +If you want to add a PD node to the `10.0.1.9` host, take the following steps. + +1. Configure the scale-out topology + + Put the following contents in the `scale-out-pd.yaml` file: + + ```yaml + pd_servers: + - host: 10.0.1.9 + ssh_port: 22 + client_port: 2379 + peer_port: 2380 + deploy_dir: /data/deploy/install/deploy/pd-2379 + data_dir: /data/deploy/install/data/pd-2379 + log_dir: /data/deploy/install/log/pd-2379 + ``` + + To view the configuration of the current cluster, run `tiup cluster edit-config `. Because the parameter configuration of `global` and `server_configs` is inherited by `scale-out-pd.yaml` and thus also takes effect in `scale-out-pd.yaml`. + +2. Run the scale-out command + + ```shell + tiup cluster scale-out scale-out-pd.yaml + ``` + + If you see the message "Scaled cluster out successfully", it means that the scale-out operation is successfully completed. + +3. Check the cluster status + + ```shell + tiup cluster display + ``` + + Access the monitoring platform at using your browser to monitor the status of the cluster and the new node. + +After the scale-out, the cluster topology is as follows: + +| Host IP | Service | +|:-------- |:------- | +| 10.0.1.1 | Monitor | +| 10.0.1.2 | PD | +| 10.0.1.3 | PD | +| 10.0.1.4 | PD | +| 10.0.1.5 | TiKV | +| 10.0.1.6 | TiKV | +| 10.0.1.7 | TiKV | +| 10.0.1.8 | TiKV | +| 10.0.1.9 | **PD** | + +## Scale in a TiKV cluster + +If you want to remove a TiKV node from the `10.0.1.5` host, take the following steps. + +{{< info >}} +You can take similar steps to remove a PD node. +{{< /info >}} + +1. View the node ID information: + + ```shell + tiup cluster display + ``` + +2. Run the scale-in command: + + ```shell + tiup cluster scale-in --node 10.0.1.5:20160 + ``` + + The `--node` parameter is the ID of the node to be taken offline. + + If you see the message "Scaled cluster in successfully", it means that the scale-in operation is successfully completed. + + Besides, if the status of the node to be taken offline becomes `Tombstone`, it also indicates that the scale-in operation is successfully completed because the scale-in process takes some time. + +3. Check the cluster status: + + To check the scale-in status, run the following command: + + ```shell + tiup cluster display + ``` + + Access the monitoring platform at using your browser, and view the status of the cluster. + +After the scale-in, the current topology is as follows: + +| Host IP | Service | +|:-------- |:------- | +| 10.0.1.1 | Monitor | +| 10.0.1.2 | PD | +| 10.0.1.3 | PD | +| 10.0.1.4 | PD | +| 10.0.1.6 | TiKV | +| 10.0.1.7 | TiKV | +| 10.0.1.8 | TiKV | +| 10.0.1.9 | PD | diff --git a/content/docs/7.1/deploy/operate/upgrade.md b/content/docs/7.1/deploy/operate/upgrade.md new file mode 100644 index 00000000..ca164ad7 --- /dev/null +++ b/content/docs/7.1/deploy/operate/upgrade.md @@ -0,0 +1,154 @@ +--- +title: Upgrade +description: Learn how to upgrade TiKV using TiUP +menu: + "7.1": + parent: Operate TiKV-7.1 + weight: 1 + identifier: Upgrade-7.1 +--- + +This document is targeted for users who want to upgrade from TiKV 4.0 versions to TiKV 5.0 versions, or from TiKV 5.0 to a later version. + +## Preparations + +This section introduces the preparation works needed before upgrading your TiKV cluster, including upgrading TiUP and the TiUP Cluster component. + +### Step 1: Upgrade TiUP + +Before upgrading your TiKV cluster, you first need to upgrade TiUP. + +1. Upgrade the TiUP version. It is recommended that the TiUP version is `1.4.0` or later. + + ```bash + tiup update --self + tiup --version + ``` + +2. Upgrade the TiUP Cluster version. It is recommended that the TiUP Cluster version is `1.4.0` or later. + + + ```bash + tiup update cluster + tiup cluster --version + ``` + +### Step 2: Edit TiUP topology configuration file + +{{< info >}} +Skip this step if one of the following situations applies: + ++ You have not modified the configuration parameters of the original cluster. Or you have modified the configuration parameters using `tiup cluster` but no more modification is needed. ++ After the upgrade, you want to use v5.0's default parameter values for the unmodified configuration items. +{{< /info >}} + +1. Edit the topology file: + + ```bash + tiup cluster edit-config + ``` + +2. Refer to the format of [topology](https://github.com/pingcap/tiup/blob/release-1.4/embed/templates/examples/topology.example.yaml) configuration template and fill the parameters you want to modify in the `server_configs` section of the topology file. + +3. Save the changes and exit the editing mode. + +## Perform a rolling upgrade to the TiKV cluster + +This section describes how to perform a rolling upgrade to the TiKV cluster and how to verify the version after the upgrade. + +### Upgrade the TiKV cluster to a specified version + +You can upgrade your cluster in one of the two ways: online upgrade and offline upgrade. + +By default, TiUP Cluster upgrades the TiKV cluster using the online method, which means that the TiKV cluster can still provide services during the upgrade process. With the online method, the leaders are migrated one by one on each node before the upgrade and restart. Therefore, for a large-scale cluster, it takes a long time to complete the entire upgrade operation. + +If your application has a maintenance window for the database to be stopped for maintenance, you can use the offline upgrade method to quickly perform the upgrade operation. + +#### Online upgrade + +```bash +tiup cluster upgrade +``` + +For example, if you want to upgrade the cluster to v5.0.1: + +```bash +tiup cluster upgrade v5.0.1 +``` + +{{< info >}} ++ Performing a rolling upgrade to the cluster will upgrade all components one by one. During the upgrade of TiKV, all leaders in a TiKV instance are evicted before stopping the instance. The default timeout time is 5 minutes (300 seconds). The instance is directly stopped after this timeout time. + ++ To perform the upgrade immediately without evicting the leader, specify `--force` in the command above. This method causes performance jitter but not data loss. + ++ To keep a stable performance, make sure that all leaders in a TiKV instance are evicted before stopping the instance. You can set `--transfer-timeout` to a larger value, for example, `--transfer-timeout 3600` (unit: second). +{{< /info >}} + +#### Offline upgrade + +1. Before the offline upgrade, you first need to stop the entire cluster. + + ```bash + tiup cluster stop + ``` + +2. Use the `upgrade` command with the `--offline` option to perform the offline upgrade. + + ```bash + tiup cluster upgrade --offline + ``` + +3. After the upgrade, the cluster will not be automatically restarted. You need to use the `start` command to restart it. + + ```bash + tiup cluster start + ``` + +### Verify the cluster version + +Execute the `display` command to view the latest cluster version `TiKV Version`: + +```bash +tiup cluster display + +Cluster name: +Cluster version: v5.0.1 +``` + +## FAQ + +This section describes common problems encountered when updating the TiKV cluster using TiUP. + +### If an error occurs and the upgrade is interrupted, how to resume the upgrade after fixing this error? + +Re-execute the `tiup cluster upgrade` command to resume the upgrade. The upgrade operation restarts the nodes that have been previously upgraded. If you do not want the upgraded nodes to be restarted, use the `replay` sub-command to retry the operation: + +1. Execute `tiup cluster audit` to see the operation records: + + ```bash + tiup cluster audit + ``` + + Find the failed upgrade operation record and keep the ID of this operation record. The ID is the `` value in the next step. + +2. Execute `tiup cluster replay ` to retry the corresponding operation: + + ```bash + tiup cluster replay + ``` + +### The evict leader has waited too long during the upgrade. How to skip this step for a quick upgrade? + +You can specify `--force`. Then the processes of transferring PD leader and evicting TiKV leader are skipped during the upgrade. The cluster is directly restarted to update the version, which has a great impact on the cluster that runs online. Here is the command: + +```bash +tiup cluster upgrade --force +``` + +### How to update the version of tools such as pd-ctl after upgrading the TiKV cluster? + +You can upgrade the tool version by using TiUP to install the `ctl` component of the corresponding version: + +```bash +tiup install ctl:v5.0.0 +``` diff --git a/content/docs/7.1/deploy/performance/instructions.md b/content/docs/7.1/deploy/performance/instructions.md new file mode 100644 index 00000000..46fca349 --- /dev/null +++ b/content/docs/7.1/deploy/performance/instructions.md @@ -0,0 +1,194 @@ +--- +title: Benchmark Instructions +description: How to do a benchmark over a TiKV cluster +menu: + "7.1": + parent: Benchmark and Performance-7.1 + weight: 6 + identifier: Benchmark Instructions-7.1 +--- + +TiKV delivers predictable throughput and latency at all scales on commodity hardware. This document provides a step-by-step tutorial on performing a benchmark test using the industry-standard benchmark tool [YCSB](https://github.com/brianfrankcooper/YCSB) on TiKV. + +## Step 1. Set up the environment + +1. Prepare 1 node for the YCSB benchmark worker, 1 node for Placement Driver (PD), and 3 nodes for TiKV. + + The following table shows the recommended hardware configuration: + + | **Component** | **CPU** | **Memory** | **Storage** | **Network** | **Instance** | + | ------------- | ---------------- | -------------- | -------------------- | ----------- | ------------------------------- | + | YSCB worker | 8 cores or above | 8 GB or above | No requirement | Gigabit LAN | 1 | + | PD | 4 cores or above | 8 GB or above | SAS, 200 GB+ | Gigabit LAN | 1 | + | Monitor | 4 cores or above | 8 GB or above | SAS, 200 GB or above | Gigabit LAN | 1 (can be the same as PD nodes) | + | TiKV | 8 cores or above | 32 GB or above | SSD, 200 GB or above | Gigabit LAN | 3 | + + {{< info >}} +It is recommended to use local SSDs as the store volume for the instances. Local SSDs are low-latency disks attached to each node and can maximize performance. It is not recommended to use the network-attached block storage. It is recommended to deploy TiKV on NVMe SSDs to maximize its capacity. + {{< /info >}} + +2. Prepare services for the control node and component nodes. + + For the control node, the following requirements must be met: + + | Package | Version | Note | + | :------ | :------------: | :--------------------- | + | sshpass | 1.06 or later | For remote control | + | TiUP | 0.6.2 or later | To deploy TiKV cluster | + + For the component node, the following requirements must be met: + + | Package | Version | Note | + | :------ | :-------------: | :----------------------------: | + | sshpass | 1.06 or later | For remote control | + | numa | 2.0.12 or later | The memory allocation strategy | + | tar | No requirement | For unzipping | + + For the YCSB node: + + | Package | Version | Note | + | :-------------------------------------------- | :------------: | :-----------: | + | [go-ycsb](https://github.com/pingcap/go-ycsb) | No requirement | For benchmark | + + {{< info >}} +You can install [TiUP](https://github.com/pingcap/tiup) as described in [TiKV in 5 Minutes](../../tikv-in-5-minutes). + {{< /info >}} + +## Step 2. Deploy a TiKV cluster + +1. You can use the following topology to deploy your benchmark cluster via `tiup cluster`. For more information, see [Production Deployment](../../install/production). Save the content below as `topology.yaml`: + + ```yaml + global: + user: "tikv" + ssh_port: 22 + deploy_dir: "/tikv-deploy" + data_dir: "/tikv-data" + server_configs: {} + pd_servers: + - host: 10.0.1.1 + tikv_servers: + - host: 10.0.1.2 + - host: 10.0.1.3 + - host: 10.0.1.4 + monitoring_servers: + - host: 10.0.1.5 + grafana_servers: + - host: 10.0.1.6 + ``` + +2. Deploy the cluster: + + ```sh + tiup cluster deploy [cluster-name] [cluster-version] topology.yaml + ``` + +3. Start the cluster: + + ```sh + tiup cluster start [cluster-name] + ``` + +4. You can check the cluster information: + + ```sh + tiup cluster display [cluster-name] + ``` + +## Step 3. Run a YCSB workload + +This section introduces the types of core workloads and the recommended sequence for running the workloads. Most of the content in this section comes from [Core Workloads](https://github.com/brianfrankcooper/YCSB/wiki/Core-Workloads). + +YCSB has 6 types of workloads. The main differences among each type are the portion of different operations. + +1. Workload A: `Update heavy workload` +2. Workload B: `Read mostly workload` +3. Workload C: `Read only` +4. Workload D: `Read latest workload`. In this workload, new records are inserted, and the most recently inserted records are the most popular. An application example is the user status updates and people want to read the latest. +5. Workload E: `Short ranges`. In this workload, short ranges of records are queried, instead of individual records. Application example: threaded conversations, where each scan is for the posts in a given thread (assumed to be clustered by thread ID). +6. Workload F: `Read-modify-write`. In this workload, the client will read a record, modify it, and write back the changes. + +All 6 workloads above have a data set which is **similar**. Workloads D and E insert records during the test run. Thus, to keep the database size consistent, the following operation sequence is recommended: + +1. Load the database, using workload A's parameter file (workloads/workloada). + + ```sh + go-ycsb load -P workloads/workloada -p ... + ``` + +2. Run workload A (using workloads/workloada) for a variety of throughputs. + + ```sh + go-ycsb run -P workloads/workloada -p ... + ``` + +3. Run workload B (using workloads/workloadb) for a variety of throughputs. +4. Run workload C (using workloads/workloadc) for a variety of throughputs. +5. Run workload F (using workloads/workloadf) for a variety of throughputs. +6. Run workload D (using workloads/workloadd) for a variety of throughputs. This workload inserts records, increasing the size of the database. +7. Delete the data in the database. You need to destroy the cluster via `tiup cluster destroy [cluster-name]` and delete the data directory of cluster. Otherwise, the remaining data of the cluster might affect the results of the following workload. +8. Start a new TiKV cluster, load a new data set using workload E's parameter file (workloads/workloade). +9. Run workload E (using workloads/workloade) for a variety of throughputs. This workload inserts records, increasing the size of the database. + +{{< info >}} +If you try to use more clients for benchmark test, see [Running a Workload in Parallel](https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload-in-Parallel). +{{< /info >}} + +For example, you can load a workload that contains 10 million records and 30 million operations by executing the following command: + +```sh +go-ycsb load tikv -P workloads/workloada -p tikv.pd="10.0.1.1:2379" -p tikv.type="raw" -p recordcount=10000000 -p operationcount=30000000 +``` + +After the data is successfully loaded, you can launch the workload: + +```sh +go-ycsb run tikv -P workloads/workloada -p tikv.pd="10.0.1.1:2379" -p tikv.type="raw" -p recordcount=10000000 -p operationcount=30000000 +``` + +You can specify the concurrency of the workload client using `-p threadcount=`. Normally, this number should be the same as that of virtual CPU cores bound to the machine. + +## Step 4. Check the benchmark results + +While `go-ycsb` is running, the workload runtime information is output, such as the OPS and latency: + +``` +... +READ - Takes(s): 9.7, Count: 110092, OPS: 11380.1, Avg(us): 3822, Min(us): 236, Max(us): 342821, 99th(us): 51000, 99.9th(us): 59000, 99.99th(us): 339000 +UPDATE - Takes(s): 9.7, Count: 110353, OPS: 11408.8, Avg(us): 7760, Min(us): 944, Max(us): 344934, 99th(us): 59000, 99.9th(us): 65000, 99.99th(us): 339000 +READ - Takes(s): 19.7, Count: 229147, OPS: 11647.2, Avg(us): 3094, Min(us): 202, Max(us): 342821, 99th(us): 52000, 99.9th(us): 58000, 99.99th(us): 335000 +``` + +When the workload is completed, the summary of the workload is reported. + +``` +Run finished, takes 4m25.292657267s +READ - Takes(s): 265.0, Count: 4998359, OPS: 18864.7, Avg(us): 1340, Min(us): 181, Max(us): 342821, 99th(us): 11000, 99.9th(us): 51000, 99.99th(us): 73000 +UPDATE - Takes(s): 265.0, Count: 5001641, OPS: 18877.1, Avg(us): 5416, Min(us): 599, Max(us): 1231403, 99th(us): 53000, 99.9th(us): 276000, 99.99th(us): 772000 +``` + +{{< warning >}} +If it reports an error like `batch commands send error:EOF`, refer to [this issue](https://github.com/pingcap/go-ycsb/issues/145). +{{< /warning >}} + +## Step 5. Find the maximum throughput + +You can find the maximum throughput of the TiKV cluster in either of the following methods: + ++ Increase the `threadcount` of the client. + + You can increase the `threadcount` to the number of virtual cores of the machine. In some cases, the number might reach the bottleneck of the TiKV cluster. + ++ Increase the count of benchmark clients. + + You can deploy more benchmark clients to increase the requests towards the TiKV cluster. Multiple `go-ycsb` from different nodes can be launched simultaneously. And then you can summarize the result of these nodes. + +Repeat the 2 methods above. When the QPS displayed in the TiKV cluster's Grafana page is no longer increasing, the bottleneck of the TiKV cluster is reached. + +## Step 6. Clean up the cluster + +After the benchmark test is finished, you might want to clean up the cluster. To do that, run the following command: + +```sh +tiup cluster destroy [cluster-name] +``` diff --git a/content/docs/7.1/deploy/performance/overview.md b/content/docs/7.1/deploy/performance/overview.md new file mode 100644 index 00000000..ee10b583 --- /dev/null +++ b/content/docs/7.1/deploy/performance/overview.md @@ -0,0 +1,74 @@ +--- +title: Performance Overview +description: An overview of TiKV performance +menu: + "7.1": + parent: Benchmark and Performance-7.1 + weight: 5 + identifier: Performance Overview-7.1 +--- + + +TiKV can deliver predictable throughput and latency at all scales on requiring hardware. This document provides an overview of TiKV benchmark performance on throughput and latency. + +To learn how to reproduce the benchmark results in this document, see [Benchmark Instructions](../instructions). If you do not achieve similar results, check whether your hardware, workload, and test design meet the requirements in this document. + +## Baseline + +The TiKV performance in this document is evaluated using [GO YCSB](https://github.com/pingcap/go-ycsb), which is the Go version of the industry-standard [Yahoo! Cloud Serving Benchmark (YCSB)](https://github.com/brianfrankcooper/YCSB). + +The goal of the YCSB project is to develop a framework and common set of workloads for evaluating the performance of different key-value and cloud serving stores. For more information about how YCSB is measured, see [Core Workload](https://github.com/brianfrankcooper/YCSB/wiki/Core-Workloads). + +## Cluster configuration + +To provide the overall performance of TiKV throughput and latency, the benchmark in this document uses a 3-node TiKV cluster with different client concurrencies. + +The configuration of the 3-node cluster is as follows: + +| CPU | Memory | Disk | Mode | +| ---------------------------------------------------------- | ------ | ---------------- | ----- | +| 40 virtual CPUs, Intel(R) Xeon(R) CPU E5-2630 v4 @ 2.20GHz | 64GiB | 500GiB NVMEe SSD | RawKV | + +In addition, a 12-pod cluster is deployed to simulate a large workload. Each pod is allocated with 40 threads to run a YSCB workload with 10M operations over a dataset with 10M records. + +## Benchmark results + +The results show that **A 3-node TiKV cluster achieves at most 200,000 OPS within 10 ms latency in a 10M records and 10M operations YCSB workload**. + +TiKV achieves this performance in [linearizability](https://en.wikipedia.org/wiki/Linearizability), a strong correctness condition, which constrains what outputs are possible when an object is accessed by multiple processes concurrently. + +### Throughput + +On a 3-node cluster of configuration listed above, TiKV can achieve 212,000 point get read per second on the YCSB workloadc and 43,200 update per second on the YCSB workloada. With different concurrencies, the throughput changes are shown in [Figure 1](https://docs.google.com/spreadsheets/d/e/2PACX-1vTIx695jjL3qYN1iR4xC3N8qh0B1qsHOALSBqf1B469b0DIZwVdzZMcSbBOOtAIo31hAdW0x_EXjmgq/pubchart?oid=1044850259&format=interactive). + +{{< figure + src="/img/docs/ycsb-throughput.svg" + caption="YCSB throughput" + width="1000" + number="1" >}} + +### Latency + +TiKV is suitable for delay-sensitive services. Even at a high pressure throughput, the average latency is less than 10 ms, as shown in [Figure 2](https://docs.google.com/spreadsheets/d/e/2PACX-1vTIx695jjL3qYN1iR4xC3N8qh0B1qsHOALSBqf1B469b0DIZwVdzZMcSbBOOtAIo31hAdW0x_EXjmgq/pubchart?oid=334435174&format=interactive). + +{{< figure + src="/img/docs/avg-latency.svg" + caption="YCSB latency" + width="1000" + number="2" >}} + +For the 99th percentile latency, see [Figure 3](https://docs.google.com/spreadsheets/d/e/2PACX-1vTIx695jjL3qYN1iR4xC3N8qh0B1qsHOALSBqf1B469b0DIZwVdzZMcSbBOOtAIo31hAdW0x_EXjmgq/pubchart?oid=6574505&format=interactive). + +{{< figure + src="/img/docs/99-latency.svg" + caption="YCSB 99th percentile latency" + width="1000" + number="3" >}} + +## Performance limitations + +For the current TiKV release, if replication factors increase, the TiKV latency increases linearly. In addition, under heavily write workload, the write latency increases much faster than the read latency. For the next several releases, more improvements will be made to address the limitations. + +## See also + +* If you are interested in more benchmark results, see this [sheet](https://docs.google.com/spreadsheets/d/1VjzC3IxCiqGQmSUgRxewgExE3c32YiZMUKNsKDuvrPg/edit?usp=sharing). diff --git a/content/docs/7.1/deploy/performance/performance.md b/content/docs/7.1/deploy/performance/performance.md new file mode 100644 index 00000000..62662b01 --- /dev/null +++ b/content/docs/7.1/deploy/performance/performance.md @@ -0,0 +1,14 @@ +--- +title: Benchmark and Performance +description: Learn about the performance of TiKV. +menu: + "7.1": + parent: Deploy-7.1 + weight: 4 + identifier: Benchmark and Performance-7.1 +--- + +This section introduces an overview of TiKV performance and the instructions to do a benchmark. + +- [Performance Overview](../overview) +- [Benchmark Instructions](../instructions) diff --git a/content/docs/7.1/develop/clients/cpp.md b/content/docs/7.1/develop/clients/cpp.md new file mode 100644 index 00000000..24f66147 --- /dev/null +++ b/content/docs/7.1/develop/clients/cpp.md @@ -0,0 +1,17 @@ +--- +title: C++ Client +description: Interact with TiKV using C++. +menu: + "7.1": + parent: TiKV Clients-7.1 + weight: 5 + identifier: C++ Client-7.1 +--- + +{{< warning >}} +Currently, the TiKV client for C++ is not released yet. +{{< /warning >}} + +The TiKV client for C++ is built on top of [TiKV client for Rust](https://github.com/tikv/client-rust) using [cxx](https://github.com/dtolnay/cxx). + +This C++ client is still in the proof-of-concept stage and under development. You can track the development progress at the [tikv/client-cpp](https://github.com/tikv/client-cpp/) repository. diff --git a/content/docs/7.1/develop/clients/go.md b/content/docs/7.1/develop/clients/go.md new file mode 100644 index 00000000..deea019b --- /dev/null +++ b/content/docs/7.1/develop/clients/go.md @@ -0,0 +1,243 @@ +--- +title: Go Client +description: Interact with TiKV using Go. +menu: + "7.1": + parent: TiKV Clients-7.1 + weight: 2 + identifier: Go Client-7.1 +--- + +This document guides you on how to use [Go Client](https://github.com/tikv/client-go) through some simple examples. For more details, please visit [client-go wiki]. + +## Try the transactional key-value API + +The `txnkv` package provides a transactional API against TiKV cluster. + +### Create Client + +The topology of a TiKV cluster can be discovered by PD server. After starting a TiKV cluster successfully, we can use PD's address list to create a client to interact with it. + +```go +import "github.com/tikv/client-go/v2/txnkv" + +client, err := txnkv.NewClient([]string{"127.0.0.1:2379"}) +``` + +### Closing Client + +When you are done with a client, you need to gracefully close the client to finish pending tasks and terminate all background jobs. + +```go +// ... create a client as described above ... +// ... do something with the client ... +if err := client.Close(); err != nil { + // ... handle error ... +} +``` + +### Starting Transaction + +When using the transactional API, almost all read and write operations are done within a transaction (or a snapshot). You can use `Begin` to start a transaction. + +```go +txn, err := client.Begin(opts...) +if err != nil { + // ... handle error ... +} +``` + +### Reads + +`TxnKV` provides `Get`, `BatchGet`, `Iter` and `IterReverse` methods to query TiKV. + +`Get` retrieves a key-value record from TiKV. + +```go +import tikverr "github.com/tikv/client-go/v2/error" + +v, err := txn.Get(context.TODO(), []byte("foo")) +if tikverr.IsErrNotFound(err) { + // ... handle not found ... +} else if err != nil { + // ... handle other errors ... +} +// ... handle value v ... +``` + +When reading multiple keys from TiKV, `BatchGet` can be used. + +```go +values, err := txn.BatchGet(context.TODO(), keys) +if err != nil { + // ... handle error ... +} +for _, k := range keys { + if v, ok := values[string(k)]; ok { + // ... handle record k:v ... + } else { + // ... k does not exist ... + } +} +``` + +All key-value records are logically arranged in sorted order. The iterators allow applications to do range scans on TiKV. The iterator yields records in the range `[start, end)`. + +```go +iter, err := txn.Iter(start, end) +if err != nil { + // ... handle error ... +} +defer iter.Close() +for iter.Valid() { + k, v := iter.Key(), iter.Value() + // ... handle record k:v + if err := iter.Next(); err != nil { + // ... handle error ... + } +} +``` + +`IterReverse` also creates an iterator instance, but it iterates in reverse order. + +### Writes + +You can use `Set` and `Delete` methods to write data into the transaction. + +```go +if err := txn.Set([]byte("foo"), []byte("bar")); err != nil { + // ... handle error ... +} +if err := txn.Delete([]byte("foo")); err != nil { + // ... handle error ... +} +``` + +### Committing or Rolling Back Transaction + +To actually commit the transaction to TiKV, you need to call `Commit` to trigger the commit process. + +If the transaction does not need to commit, for optimistic transactions, you can just discard the transaction instance, for pessimistic transactions you need to actively call the `Rollback()` method to clean up the data previously sent to TiKV. + +```go +if err := txn.Commit(context.TODO()); err != nil { + // ... handle error ... +} +// ... commit success ... +``` + +### Snapshots (Read-Only Transactions) + +If you want to create a read-only transaction, you can use `GetSnapshot` method to create a snapshot. A `Snapshot` is more lightweight than a transaction. + +```go +ts, err := client.CurrentTimestamp("global") +if err != nil { + // ... handle error ... +} +snapshot := client.GetSnapshot(ts) +v, err := snapshot.Get(context.TODO(), []byte("foo")) +// ... handle Get result ... +``` + +Snapshot can also be extracted from an existing transaction. + +```go +snapshot := txn.GetSnapshot() +// ... use snapshot ... +``` + + +## Try the Raw key-value API + +### Create client + +After starting a TiKV cluster successfully, we can use PD's address list to create a client to interact with it. + +```go +import ( + "github.com/tikv/client-go/v2/rawkv" +) + +client, err := rawkv.NewClientWithOpts(context.TODO(), []string{"127.0.0.1:2379"}) +if err != nil { + // ... handle error ... +} +``` + +### Closing Client + +When you are done with a client, you need to gracefully close the client to finish pending tasks and terminate all background jobs. + +```go +if err := client.Close(); err != nil { + // ... handle error ... +} +``` + +### Single Key Read/Write + +`RawKV` provides `Get`, `Put` and `Delete` methods to read and write a single key. + +```go +v, err := client.Get(context.TODO(), []byte("key")) +if err != nil { + // ... handle error ... +} +if v == nil { + // ... handle not found ... +} else { + // ... handle value v ... +} + +err = client.Put(context.TODO(), []byte("key"), []byte("value")) +if err != nil { + // ... handle error ... +} + +err = client.Delete(context.TODO(), []byte("key")) +if err != nil { + // ... handle error ... +} +``` + +### Iterations +Like `txnkv`, there are also `Scan` and `ReverseScan` methods to iterate over a range of keys. + +```go +keys, values, err := client.Scan(context.TODO(), []byte("begin"), []byte("end"), 10) +if err != nil { + // ... handle error ... +} +// ... handle keys, values ... + +keys, values, err := client.ReverseScan(context.TODO(), []byte("end"), []byte("begin"), 10) +if err != nil { + // ... handle error ... +} +// ... handle keys, values ... +``` + +### Batch Operations + +`RawKV` also supports batch operations using batch. Note that since `RawKV` does not provide transaction semantic, we do not guarantee that all writes will succeed or fail at the same time when these keys are distributed across multiple regions. + +```go +values, err := client.BatchGet(context.TODO(), [][]byte{[]byte("key1"), []byte("key2")}) +if err != nil { + // ... handle error ... +} +// ... handle values ... + +err = client.BatchPut(context.TODO(), [][]byte{[]byte("key1"), []byte("key2")}, [][]byte{[]byte("value1"), []byte("value2")}) +if err != nil { + // ... handle error ... +} + +err = client.BatchDelete(context.TODO(), [][]byte{[]byte("key1"), []byte("key2")}) +if err != nil { + // ... handle error ... +} +``` + +[client-go wiki]: https://github.com/tikv/client-go/wiki diff --git a/content/docs/7.1/develop/clients/introduction.md b/content/docs/7.1/develop/clients/introduction.md new file mode 100644 index 00000000..895c13e0 --- /dev/null +++ b/content/docs/7.1/develop/clients/introduction.md @@ -0,0 +1,33 @@ +--- +title: TiKV Clients +description: Interact with TiKV using the raw key-value API or the transactional key-value API. +menu: + "7.1": + parent: Develop-7.1 + weight: 1 + identifier: TiKV Clients-7.1 +--- + +TiKV offers two APIs that you can interact with: + +| API | Description | Atomicity | Usage scenarios | +|:------------- |:------------------------------------------------------------------------------ |:------------- |:------------------------------------------------------------------------------------ | +| Raw | A low-level key-value API to interact directly with individual key-value pairs | Single key | Your application requires low latency and does not involve distributed transactions. | +| Transactional | A high-level key-value API to provide ACID semantics. | Multiple keys | Your application requires distributed transactions. | + +{{< warning >}} +To use both the raw and transactional APIs in the same cluster, please enable [API V2](../../../concepts/explore-tikv-features/api-v2). +{{< /warning >}} + +TiKV provides the following clients developed in different programming languages: + +| Clients | RawKV API | TxnKV API | Supported TiKV Version | +| -------------------------- | ----------------- | ----------------- | ---------------------- | +| [Java Client](../java) | (Stable) Has been used in the production environment of some commercial customers in latency sensitive systems. | (Stable) Has been used in the [TiSpark] and [TiBigData] project to integrate data from TiDB to Big Data ecosystem. [TiSpark] and [TiBigData] are used in the production system of some commercial customers and internet companies. | >= 2.0.0 | +| [Go Client](../go) | (Stable) Has been used in the production environment of some internet commercial customers, to access TiKV as feature store and other scenarios. | (Stable) Has been used as one of the fundamental library of TiDB. Has been used in production environment of some internet commercial customers to access TiKV as metadata store and other scenarios. | >= 5.0.0 | +| [Rust Client](../rust) | (Unstable) | (Unstable) | >= 5.0.0 | +| [Python Client](../python) | (Unstable) | (Unstable) | >= 5.0.0 | +| [C++ Client](../cpp) | (Unstable) | (Unstable) | >= 5.0.0 | + +[TiSpark]: https://github.com/pingcap/tispark +[TiBigData]: https://github.com/tidb-incubator/TiBigData diff --git a/content/docs/7.1/develop/clients/java.md b/content/docs/7.1/develop/clients/java.md new file mode 100644 index 00000000..ee94628d --- /dev/null +++ b/content/docs/7.1/develop/clients/java.md @@ -0,0 +1,145 @@ +--- +title: Java Client +description: Interact with TiKV using Java. +menu: + "7.1": + parent: TiKV Clients-7.1 + weight: 1 + identifier: Java Client-7.1 +--- + +This document guides you on how to use [Java Client](https://github.com/tikv/client-java) through some simple examples. For more details, please visit [TiKV Java Client User Documents]. + +{{< info >}} +TiKV Java Client is developed and released using Java8. The minimum supported version of TiKV is 2.0.0. +{{< /info >}} + +## Add the dependency + +To start, open the `pom.xml` of your project, and add the `tikv-client-java` as dependencies if you are using Maven. + +```xml + + org.tikv + tikv-client-java + 3.2.0 + +``` + +## Try the transactional key-value API + +Below is the basic usages of `TxnKV`. Data should be written into TxnKV using [`TwoPhaseCommitter`](), and be read using [`org.tikv.txn.KVClient`](). + +```java +import java.util.Arrays; +import java.util.List; + +import org.tikv.common.BytePairWrapper; +import org.tikv.common.ByteWrapper; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.common.util.BackOffer; +import org.tikv.common.util.ConcreteBackOffer; +import org.tikv.kvproto.Kvrpcpb.KvPair; +import org.tikv.shade.com.google.protobuf.ByteString; +import org.tikv.txn.KVClient; +import org.tikv.txn.TwoPhaseCommitter; + +public class App { + public static void main(String[] args) throws Exception { + TiConfiguration conf = TiConfiguration.createDefault("127.0.0.1:2379"); + try (TiSession session = TiSession.create(conf)) { + // two-phase write + long startTS = session.getTimestamp().getVersion(); + try (TwoPhaseCommitter twoPhaseCommitter = new TwoPhaseCommitter(session, startTS)) { + BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(1000); + byte[] primaryKey = "key1".getBytes("UTF-8"); + byte[] key2 = "key2".getBytes("UTF-8"); + + // first phase: prewrite + twoPhaseCommitter.prewritePrimaryKey(backOffer, primaryKey, "val1".getBytes("UTF-8")); + List pairs = Arrays + .asList(new BytePairWrapper(key2, "val2".getBytes("UTF-8"))); + twoPhaseCommitter.prewriteSecondaryKeys(primaryKey, pairs.iterator(), 1000); + + // second phase: commit + long commitTS = session.getTimestamp().getVersion(); + twoPhaseCommitter.commitPrimaryKey(backOffer, primaryKey, commitTS); + List keys = Arrays.asList(new ByteWrapper(key2)); + twoPhaseCommitter.commitSecondaryKeys(keys.iterator(), commitTS, 1000); + } + + try (KVClient kvClient = session.createKVClient()) { + long version = session.getTimestamp().getVersion(); + ByteString key1 = ByteString.copyFromUtf8("key1"); + ByteString key2 = ByteString.copyFromUtf8("key2"); + + // get value of a single key + ByteString val = kvClient.get(key1, version); + System.out.println(val); + + // get value of multiple keys + BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(1000); + List kvPairs = kvClient.batchGet(backOffer, Arrays.asList(key1, key2), version); + System.out.println(kvPairs); + + // get value of a range of keys + kvPairs = kvClient.scan(key1, ByteString.copyFromUtf8("key3"), version); + System.out.println(kvPairs); + } + } + } +} +``` + +## Try the Raw key-value API + +Below is the basic usages of `RawKV`. + +```java +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.kvproto.Kvrpcpb; +import org.tikv.raw.RawKVClient; +import org.tikv.shade.com.google.protobuf.ByteString; + +public class Main { + public static void main(String[] args) throws Exception { + // You MUST create a raw configuration if you are using RawKVClient. + TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); + TiSession session = TiSession.create(conf); + RawKVClient client = session.createRawClient(); + + // put + client.put(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("Hello")); + client.put(ByteString.copyFromUtf8("k2"), ByteString.copyFromUtf8(",")); + client.put(ByteString.copyFromUtf8("k3"), ByteString.copyFromUtf8("World")); + client.put(ByteString.copyFromUtf8("k4"), ByteString.copyFromUtf8("!")); + client.put(ByteString.copyFromUtf8("k5"), ByteString.copyFromUtf8("Raw KV")); + + // get + Optional result = client.get(ByteString.copyFromUtf8("k1")); + System.out.println(result.get().toStringUtf8()); + + // batch get + List list = client.batchGet(new ArrayList() {{ + add(ByteString.copyFromUtf8("k1")); + add(ByteString.copyFromUtf8("k3")); + }}); + System.out.println(list); + + // scan + list = client.scan(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("k6"), 10); + System.out.println(list); + + // close + client.close(); + session.close(); + } +} +``` + +[TiKV Java Client User Documents]: https://tikv.github.io/client-java/introduction/introduction.html diff --git a/content/docs/7.1/develop/clients/python.md b/content/docs/7.1/develop/clients/python.md new file mode 100644 index 00000000..a0ab0f42 --- /dev/null +++ b/content/docs/7.1/develop/clients/python.md @@ -0,0 +1,17 @@ +--- +title: Python Client +description: Interact with TiKV using Python. +menu: + "7.1": + parent: TiKV Clients-7.1 + weight: 4 + identifier: Python Client-7.1 +--- + +TiKV client for python is built on top of [TiKV Client in Rust](https://github.com/tikv/client-rust) via CFFI and [PyO3 Python binding](https://github.com/PyO3/pyo3). + +The Python client is still in the proof-of-concept stage and under development. You can track the development at [tikv/client-py](https://github.com/tikv/client-py/) repository. + +{{< warning >}} +You should not use the Python client for production use until it is released. +{{< /warning >}} diff --git a/content/docs/7.1/develop/clients/rust.md b/content/docs/7.1/develop/clients/rust.md new file mode 100644 index 00000000..450650b9 --- /dev/null +++ b/content/docs/7.1/develop/clients/rust.md @@ -0,0 +1,147 @@ +--- +title: Rust Client +description: Interact with TiKV using Rust. +menu: + "7.1": + parent: TiKV Clients-7.1 + weight: 3 + identifier: Rust Client-7.1 +--- + +TiKV Rust Client is still in the proof-of-concept stage and under development. You can track the development at [tikv/client-rust](https://github.com/tikv/client-rust/) repository. + +{{< warning >}} +Before TiKV Rust Client is officially released, it is not recommended to be used in a production environment. +{{< /warning >}} + +This guide introduces how to interact with TiKV using [Rust Client](https://github.com/tikv/client-rust). + +{{< warning >}} +The minimum supported version of Rust is 1.40. The minimum supported version of TiKV is 5.0.0. +{{< /warning >}} + +## Basic data types + +Both RawKV API and TxnKV API use the following basic data types: + +* `Key`: Refers to a key in the store. `String` and `Vec` implement `Into`, so you can pass them directly into client functions. +* `Value`: Refers to a value in the store, which is an alias of `Vec`. +* `KvPair`: Refers to a key-value pair. It provides convenient methods for conversion to and from other types. +* `BoundRange`: Used for range related requests like `scan`. It implements `From` for Rust ranges so you can pass a Rust range of keys to the request. For example: `client.delete_range(vec![]..)`. + +## Add dependencies + +Before you start, you need to add the `tikv-client` as a dependency in the `Cargo.toml` file of your project. + +```toml +[dependencies] +tikv-client = "0.1.0" +``` + +## Raw key-value API + +With a connected `tikv_client::RawClient`, you can perform actions such as `put`, `get`, `delete`, and `scan`: + +```rust +let client = RawClient::new(vec!["127.0.0.1:2379"]).await?; + +let key = "Hello".to_owned(); +let value = "RawKV".to_owned(); + +// put +let result = client.put(key.to_owned(), value.to_owned()).await?; +assert_eq!(result, ()); + +// get +let result = client.get(key.to_owned()).await?; +assert_eq!(result.unwrap(), value.as_bytes()); + +// delete +let result = client.delete(key.to_owned()).await?; +assert_eq!(result, ()); + +// get +let result = client.get(key.to_owned()).await?; +assert_eq!(result, None); + +// scan +let limit = 1000; +client.put("k1".to_owned(), "v1".to_owned()).await?; +client.put("k2".to_owned(), "v2".to_owned()).await?; +client.put("k3".to_owned(), "v3".to_owned()).await?; +client.put("k4".to_owned(), "v4".to_owned()).await?; +let result = client.scan("k1".to_owned().."k5".to_owned(), limit).await?; +println!("{:?}", result); +``` + +These functions also have batch variants (`batch_put`, `batch_get`, `batch_delete` and `batch_scan`), which help to considerably reduce network overhead and greatly improve performance under certain workloads. + +You can find all the functions that `RawClient` supports in the [Raw requests table](https://github.com/tikv/client-rust#raw-requests). + +## Transactional key-value API + +With a connected `tikv_client::TransactionClient`, you can begin a transaction: + +```rust +use tikv_client::TransactionClient; + +let txn_client = TransactionClient::new(vec!["127.0.0.1:2379"]).await?; +let mut txn = txn_client.begin_optimistic().await?; +``` + +Then you can send commands such as `get`, `set`, `delete`, and `scan`: + +```rust +let key = "Hello".to_owned(); +let value = "TxnKV".to_owned(); + +// put +let mut txn = txn_client.begin_optimistic().await?; +txn.put(key.to_owned(), value.to_owned()).await?; +txn.commit().await?; + +// get +let mut txn = txn_client.begin_optimistic().await?; +let result = txn.get(key.to_owned()).await?; +txn.commit().await?; +assert_eq!(result.unwrap(), value.as_bytes()); + +// delete +let mut txn = txn_client.begin_optimistic().await?; +txn.delete(key.to_owned()).await?; +txn.commit().await?; + +// get +let mut txn = txn_client.begin_optimistic().await?; +let result = txn.get(key.to_owned()).await?; +txn.commit().await?; +assert_eq!(result, None); + +// scan +let mut txn = txn_client.begin_optimistic().await?; +txn.put("k1".to_owned(), "v1".to_owned()).await?; +txn.put("k2".to_owned(), "v2".to_owned()).await?; +txn.put("k3".to_owned(), "v3".to_owned()).await?; +txn.put("k4".to_owned(), "v4".to_owned()).await?; +txn.commit().await?; + +let limit = 1000; +let mut txn2 = txn_client.begin_optimistic().await?; +let result = txn2.scan("k1".to_owned().."k5".to_owned(), limit).await?; +result.for_each(|pair| println!("{:?}", pair)); +txn2.commit().await?; +``` + +You can commit these changes when you are ready, or roll back if you prefer to abort the operation: + +```rust +if all_is_good { + txn.commit().await?; +} else { + txn.rollback().await?; +} +``` + +These functions also have batch variants (`batch_put`, `batch_get`, `batch_delete` and `batch_scan`), which help to considerably reduce network overhead and greatly improve performance under certain workloads. + +You can find all the functions that `TransactionClient` supports in the [Transactional requests table](https://github.com/tikv/client-rust#transactional-requests). diff --git a/content/docs/7.1/develop/develop.md b/content/docs/7.1/develop/develop.md new file mode 100644 index 00000000..1f9f51fc --- /dev/null +++ b/content/docs/7.1/develop/develop.md @@ -0,0 +1,32 @@ +--- +title: Develop +description: Learn how to use TiKV Clients for different languages +menu: + "7.1": + weight: 3 + identifier: Develop-7.1 +--- + +Learn how to use TiKV Clients for different languages. + +## [TiKV Clients](../clients/introduction/) + +TiKV provides the following clients developed in different programming languages: + +- [Java Client](../clients/java) is ready for production. +- [Go Client](../clients/go) is ready for production. +- [Rust Client](../clients/rust) is still in the proof-of-concept stage and under development. +- [Python Client](../clients/python) is still in the proof-of-concept stage and under development. +- [C++ Client](../clients/cpp) is still in the proof-of-concept stage and under development. + +## RawKV and TxnKV + +TiKV provides both transactional (TxnKV) API and non-transactional (RawKV) API. + +Learn how to use [RawKV API](../rawkv/introduction/): + +- [Get Put Delete](../rawkv/get-put-delete/) +- [Scan](../rawkv/scan) +- [Time to Live (TTL)](../rawkv/ttl) +- [Compare And Swap (CAS)](../rawkv/cas) +- [Checksum](../rawkv/checksum) diff --git a/content/docs/7.1/develop/rawkv/cas.md b/content/docs/7.1/develop/rawkv/cas.md new file mode 100644 index 00000000..1d6b6129 --- /dev/null +++ b/content/docs/7.1/develop/rawkv/cas.md @@ -0,0 +1,79 @@ +--- +title: CAS +description: How to use RawKV's CAS API +menu: + "7.1": + parent: RawKV-7.1 + weight: 4 + identifier: CAS-7.1 +--- + +This document walks you through how to use RawKV’s `CAS (Compare And Swap)` API. + +In RawKV, compare-and-swap (CAS) is an atomic operation used to avoid data racing in concurrent write requests, which is atomically equivalent to: + +``` +prevValue = get(key); +if (prevValue == request.prevValue) { + put(key, request.value); +} +return prevValue; +``` + +The atomicity guarantees that the new value is calculated based on up-to-date information. If the value had been updated by another thread at the same time, the write would fail. + +{{< warning >}} +Normally, CAS can prevent problems from the concurrent access, but suffers from the [ABA problem](https://en.wikipedia.org/wiki/ABA_problem). +{{}} + +## Java + +The following example shows how to use `CAS` API in Java. + +```java +import java.util.Optional; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.raw.RawKVClient; +import org.tikv.shade.com.google.protobuf.ByteString; + +TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); +// enable AtomicForCAS when using RawKVClient.compareAndSet or RawKVClient.putIfAbsent +conf.setEnableAtomicForCAS(true); +TiSession session = TiSession.create(conf); +RawKVClient client = session.createRawClient(); + +ByteString key = ByteString.copyFromUtf8("Hello"); +ByteString value = ByteString.copyFromUtf8("CAS"); +ByteString newValue = ByteString.copyFromUtf8("NewValue"); + +// put +client.put(key, value); + +// get +Optional result = client.get(key); +assert(result.isPresent()); +assert("CAS".equals(result.get().toStringUtf8())); +System.out.println(result.get().toStringUtf8()); + +// cas +client.compareAndSet(key, Optional.of(value), newValue); + +// get +result = client.get(key); +assert(result.isPresent()); +assert("NewValue".equals(result.get().toStringUtf8())); +System.out.println(result.get().toStringUtf8()); + +// close +client.close(); +session.close(); +``` + +{{< warning >}} +You must set `conf.setEnableAtomicForCAS(true)` to ensure linearizability of `CAS` when using with `put`, `delete`, `batch_put`, or `batch_delete`. + +To guarantee the atomicity of CAS, write operations like `put` or `delete` in atomic mode are more expensive. +{{< /warning >}} + +The code example used in this chapter can be found [here](https://github.com/marsishandsome/tikv-client-examples/blob/main/java-example/src/main/java/example/rawkv/CAS.java). diff --git a/content/docs/7.1/develop/rawkv/checksum.md b/content/docs/7.1/develop/rawkv/checksum.md new file mode 100644 index 00000000..3ae794a0 --- /dev/null +++ b/content/docs/7.1/develop/rawkv/checksum.md @@ -0,0 +1,74 @@ +--- +title: Checksum +description: Learn how to use RawKV's Checksum API. +menu: + "7.1": + parent: RawKV-7.1 + weight: 5 + identifier: Checksum-7.1 +--- + +This document walks you through how to use RawKV's `Checksum` API. + +`Checksum` API returns `Crc64Xor`, `TotalKvs` and `TotalBytes` from TiKV cluster. +- `Crc64Xor`: The [XOR](https://en.wikipedia.org/wiki/Exclusive_or) of every key-value pair's [crc64](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) value. +- `TotalKVs`: The number of key-value pairs. +- `TotalBytes`: The size of key-value pairs in bytes. + +*Note: If [API V2](../../../concepts/explore-tikv-features/api-v2) is enabled, a `4` bytes prefix is encoded with keys, and also calculated by `Checksum` API*. + +## Go + +### Checksum with range + +Using the `Checksum` API, you can get `{Crc64Xor, TotalKvs, TotalBytes}` of a range from `startKey` (inclusive) to `endKey` (exclusive). + +{{< info >}} +To calculate checksum of all keys, specify `startKey` and `endKey` as `[]byte("")`. + +{{< /info >}} + +```go +package main + +import ( + "context" + "fmt" + + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/tikv/client-go/v2/rawkv" +) + +func main() { + ctx := context.TODO() + cli, err := rawkv.NewClientWithOpts(ctx, []string{"127.0.0.1:2379"}, + rawkv.WithAPIVersion(kvrpcpb.APIVersion_V2)) + if err != nil { + panic(err) + } + defer cli.Close() + + fmt.Printf("Cluster ID: %d\n", cli.ClusterID()) + + // put key into tikv + cli.Put(ctx, []byte("k1"), []byte("v1")) + cli.Put(ctx, []byte("k2"), []byte("v2")) + cli.Put(ctx, []byte("k3"), []byte("v3")) + cli.Put(ctx, []byte("k4"), []byte("v4")) + cli.Put(ctx, []byte("k5"), []byte("v5")) + + checksum, err := cli.Checksum(ctx, []byte("k1"), []byte("k6")) + if err != nil { + panic(err) + } + + fmt.Printf("Get checksum, Crc64Xor:%d, TotalKvs:%d, TotalBytes:%d.\n", + checksum.Crc64Xor, checksum.TotalKvs, checksum.TotalBytes) +} +``` +You will get the result as following: + +```bash +Cluster ID: 7166545317297238572 +Get checksum, Crc64Xor:7402990595130313958, TotalKvs:5, TotalBytes:40. +``` \ No newline at end of file diff --git a/content/docs/7.1/develop/rawkv/get-put-delete.md b/content/docs/7.1/develop/rawkv/get-put-delete.md new file mode 100644 index 00000000..d46e35ee --- /dev/null +++ b/content/docs/7.1/develop/rawkv/get-put-delete.md @@ -0,0 +1,86 @@ +--- +title: Get, Put, and Delete +description: How to use RawKV's basic operations such as Get, Put, and Delete. +menu: + "7.1": + parent: RawKV-7.1 + weight: 1 + identifier: Get, Put, and Delete-7.1 +--- + +This document walks you through how to use RawKV's basic operations such as `Get`, `Put`, and `Delete`. + +## Java + +### Import packages + +First, import all necessary packages as shown in the example. + +```java +import java.util.Optional; +import org.tikv.shade.com.google.protobuf.ByteString; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.raw.RawKVClient; +``` + +In the example above, `com.google.protobuf.ByteString` is used as the type of Key and Value. + +To avoid conflict, `com.google.protobuf.ByteString` is shaded to `org.tikv.shade.com.google.protobuf.ByteString`, and is included in the client package. + +### Create RawKVClient + +To connect to TiKV, a PD address `127.0.0.1:2379` is passed to `TiConfiguration`. + +{{< info >}} +A comma is used to separate multiple PD addresses. For example, `127.0.0.1:2379,127.0.0.2:2379,127.0.0.3:2379`. +{{< /info >}} + +Using the connected `org.tikv.raw.RawKVClient`, you can perform actions such as `Get`, `Put`, and `Delete`. + +```java +TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); +TiSession session = TiSession.create(conf); +RawKVClient client = session.createRawClient(); +``` + +### Write data to TiKV + +Using the `put` API, you can write a key-value pair to TiKV. + +```java +ByteString key = ByteString.copyFromUtf8("Hello"); +ByteString value = ByteString.copyFromUtf8("RawKV"); +client.put(key, value); +``` + +### Read data from TiKV + +Using the `get` API, you can get the value of a key from TiKV. If the key does not exist, `result.isPresent()` will be false. + +```java +Optional result = client.get(key); +assert(result.isPresent()); +assert("RawKV".equals(result.get().toStringUtf8())); +``` + +### Delete data from TiKV + +Using the `delete` API, you can delete a key-value pair from TiKV. + +```java +client.delete(key); +result = client.get(key); +assert(!result.isPresent()); +``` + +### Close working instances + +Finally, do not forget to close the `client` and `session` instance. + +```java +client.close(); +session.close(); +``` + +The code example used in this chapter can be found [here](https://github.com/marsishandsome/tikv-client-examples/blob/main/java-example/src/main/java/example/rawkv/PutGetDelete.java). diff --git a/content/docs/7.1/develop/rawkv/introduction.md b/content/docs/7.1/develop/rawkv/introduction.md new file mode 100644 index 00000000..41d24178 --- /dev/null +++ b/content/docs/7.1/develop/rawkv/introduction.md @@ -0,0 +1,19 @@ +--- +title: RawKV +description: How to interact with TiKV using RawKV API. +menu: + "7.1": + parent: Develop-7.1 + weight: 2 + identifier: RawKV-7.1 +--- + +TiKV supports both transactional (TxnKV) API and non-transactional (RawKV) API. This chapter walks you through a few demonstrations about how to use RawKV API. + +- [Get Put Delete](../get-put-delete/) +- [Scan](../scan) +- [Time to Live (TTL)](../ttl) +- [Compare And Swap (CAS)](../cas) +- [Checksum](../checksum) + +To get the example code in this chapter, click [here](https://github.com/marsishandsome/tikv-client-examples). diff --git a/content/docs/7.1/develop/rawkv/scan.md b/content/docs/7.1/develop/rawkv/scan.md new file mode 100644 index 00000000..4c43dbbb --- /dev/null +++ b/content/docs/7.1/develop/rawkv/scan.md @@ -0,0 +1,103 @@ +--- +title: Scan +description: Learn how to use RawKV's Scan API. +menu: + "7.1": + parent: RawKV-7.1 + weight: 2 + identifier: Scan-7.1 +--- + +This document walks you through how to use RawKV's `Scan` API. + +## Java + +### Scan with `limit` + +Using the `Scan` API, you can scan key-value pairs from TiKV in a range (from a `startKey` to an `endKey`). + +{{< info >}} +`startKey` is inclusive while `endKey` is exclusive. + +To configure the `Scan` API to return a limited number of key-value pairs, you can use the `limit` argument as in the following example code: + +{{< /info >}} + +```java +import java.util.List; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.kvproto.Kvrpcpb; +import org.tikv.raw.RawKVClient; +import org.tikv.shade.com.google.protobuf.ByteString; + +TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); +TiSession session = TiSession.create(conf); +RawKVClient client = session.createRawClient(); + +// prepare data +client.put(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("v1")); +client.put(ByteString.copyFromUtf8("k2"), ByteString.copyFromUtf8("v2")); +client.put(ByteString.copyFromUtf8("k3"), ByteString.copyFromUtf8("v3")); +client.put(ByteString.copyFromUtf8("k4"), ByteString.copyFromUtf8("v4")); + +// scan with limit +int limit = 1000; +List list = client.scan(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("k5"), limit); +for(Kvrpcpb.KvPair pair : list) { + System.out.println(pair); +} + +// close +client.close(); +session.close(); +``` + +### Scan all data + +The `Scan` API only returns a limited number of key-value pairs. If you want to fetch all the data in the range from `startKey` to `endKey`, refer to the following example code for a simple demo: + +```java +import java.util.List; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.common.key.Key; +import org.tikv.kvproto.Kvrpcpb; +import org.tikv.raw.RawKVClient; +import org.tikv.shade.com.google.protobuf.ByteString; + +TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); +TiSession session = TiSession.create(conf); +RawKVClient client = session.createRawClient(); + +// prepare data +String keyPrefix = "p"; +for(int i = 1; i <= 9; i ++) { + for(int j = 1; j <= 9; j ++) { + client.put(ByteString.copyFromUtf8(keyPrefix + i + j), ByteString.copyFromUtf8("v" + i + j)); + } +} + +// scan all data +ByteString startKey = ByteString.copyFromUtf8(keyPrefix + "11"); +ByteString endKey = Key.toRawKey(ByteString.copyFromUtf8(keyPrefix + "99")).next().toByteString(); +int limit = 4; +while(true) { + List list = client.scan(startKey, endKey, limit); + Key maxKey = Key.MIN; + for (Kvrpcpb.KvPair pair : list) { + System.out.println(pair); + Key currentKey = Key.toRawKey(pair.getKey()); + if(currentKey.compareTo(maxKey) > 0) { + maxKey = currentKey; + } + } + + if(list.size() < limit) { + break; + } + startKey = maxKey.next().toByteString(); +} +``` + +To get the example code above, click [here](https://github.com/marsishandsome/tikv-client-examples/blob/main/java-example/src/main/java/example/rawkv/Scan.java). diff --git a/content/docs/7.1/develop/rawkv/ttl.md b/content/docs/7.1/develop/rawkv/ttl.md new file mode 100644 index 00000000..85eab729 --- /dev/null +++ b/content/docs/7.1/develop/rawkv/ttl.md @@ -0,0 +1,129 @@ +--- +title: TTL +description: How to use TTL via RawKV API. +menu: + "7.1": + parent: RawKV-7.1 + weight: 3 + identifier: TTL-7.1 +--- + +TiKV provides the Time To Live (TTL) support via the RawKV API. This document provides two examples to show you how to set TTL via the RawKV API. + +## Enable TTL + +Before you set TTL via RawKV API, you must enable TTL in your TiKV cluster. TTL is disabled by default. To enable it, set the following TiKV configuration to `true`. + +```yaml +[storage] +enable-ttl = true +``` + +## Use TTL in Java client + +After TTL is enabled in TiKV, you can set it in Java client via the `put` API or `CAS` API. The following two examples show how to set TTL via the `put` API and `CAS` API. + +### Set TTL in the `put` API + +In the following examples, these operations are performed: + +1. Two key-value pairs, `(k1, v1)` and `(k2, v2)`, are written into TiKV via the `put` API. `(k1, v1)` is written with a TTL of 10 seconds. `(k2, v2)` is written without TTL. +2. Try to read `k1` and `k2` from TiKV. Both values are returned. +3. Let TiKV sleep for 10 seconds, which is the time of TTL. +4. Try to read `k1` and `k2` from TiKV. `v2` is returned, but `v1` is not returned because the TTL has expired. + +```java +import java.util.Optional; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.raw.RawKVClient; +import org.tikv.shade.com.google.protobuf.ByteString; + +TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); +TiSession session = TiSession.create(conf); +RawKVClient client = session.createRawClient(); + +// Writes the (k1, v1) into TiKV with a TTL of 10 seconds. +client.put(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("v1"), 10); + +// Writes the (k2, v2) into TiKV without TTL. +client.put(ByteString.copyFromUtf8("k2"), ByteString.copyFromUtf8("v2")); + +// Reads k1 from TiKV. v1 is returned. +Optional result1 = client.get(ByteString.copyFromUtf8("k1")); +assert(result1.isPresent()); +assert("v1".equals(result1.get().toStringUtf8())); +System.out.println(result1.get().toStringUtf8()); + +// Reads k2 from TiKV. v2 is returned. +Optional result2 = client.get(ByteString.copyFromUtf8("k2")); +assert(result2.isPresent()); +assert("v2".equals(result2.get().toStringUtf8())); +System.out.println(result2.get().toStringUtf8()); + +// Let TiKV sleep for 10 seconds. +System.out.println("Sleep 10 seconds."); +Thread.sleep(10000); + +// Reads k1 from TiKV. NULL is returned, because k1's TTL has expired. +result1 = client.get(ByteString.copyFromUtf8("k1")); +assert(!result1.isPresent()); + +// Reads k2 from TiKV. v2 is returned. +result2 = client.get(ByteString.copyFromUtf8("k2")); +assert(result2.isPresent()); +assert("v2".equals(result2.get().toStringUtf8())); +System.out.println(result2.get().toStringUtf8()); + +// Close +client.close(); +session.close(); +``` + +## Set TTL in the `CAS` API + +You can also set TTL via the `CAS` API. See the following example: + +```java +import java.util.Optional; +import org.tikv.common.TiConfiguration; +import org.tikv.common.TiSession; +import org.tikv.raw.RawKVClient; +import org.tikv.shade.com.google.protobuf.ByteString; + +TiConfiguration conf = TiConfiguration.createRawDefault("127.0.0.1:2379"); +// Enables AtomicForCAS when using RawKVClient.compareAndSet or RawKVClient.putIfAbsent +conf.setEnableAtomicForCAS(true); +TiSession session = TiSession.create(conf); +RawKVClient client = session.createRawClient(); + +ByteString key = ByteString.copyFromUtf8("Hello"); +ByteString value = ByteString.copyFromUtf8("CAS+TTL"); +ByteString newValue = ByteString.copyFromUtf8("NewValue"); + +// Writes data. +client.put(key, value); + +// CAS with TTL = 10 seconds +client.compareAndSet(key, Optional.of(value), newValue, 10); + +// Reads data. +Optional result = client.get(key); +assert(result.isPresent()); +assert("NewValue".equals(result.get().toStringUtf8())); +System.out.println(result.get().toStringUtf8()); + +// Let TiKV sleep for 10 seconds. +System.out.println("Sleep 10 seconds."); +Thread.sleep(10000); + +// Reads data. +result = client.get(key); +assert(!result.isPresent()); + +// Close +client.close(); +session.close(); +``` + +The example code above is available [here](https://github.com/marsishandsome/tikv-client-examples/blob/main/java-example/src/main/java/example/rawkv/TTL.java). diff --git a/content/docs/7.1/new-features/overview.md b/content/docs/7.1/new-features/overview.md new file mode 100644 index 00000000..a5907c4b --- /dev/null +++ b/content/docs/7.1/new-features/overview.md @@ -0,0 +1,44 @@ +--- +title: What's New +description: New features and improvements about TiKV since 6.6 +menu: + "7.1": + weight: 1 + identifier: What's New-7.1 +--- + +This document lists some significant features and improvements of **TiKV 7.1**. + +## [TiKV 6.6.0](https://docs.pingcap.com/tidb/v6.6/release-6.6.0) + +### Key new features and improvements + +#### Scalability + +* Support Partitioned Raft KV storage engine (experimental) [#11515](https://github.com/tikv/tikv/issues/11515) [#12842](https://github.com/tikv/tikv/issues/12842) @[busyjay](https://github.com/busyjay) @[tonyxuqqi](https://github.com/tonyxuqqi) @[tabokie](https://github.com/tabokie) @[bufferflies](https://github.com/bufferflies) @[5kbpers](https://github.com/5kbpers) @[SpadeA-Tang](https://github.com/SpadeA-Tang) @[nolouch](https://github.com/nolouch) + + Before TiDB v6.6.0, TiKV's Raft-based storage engine used a single RocksDB instance to store the data of all 'Regions' of the TiKV instance. To support larger clusters more stably, starting from TiDB v6.6.0, a new TiKV storage engine is introduced, which uses multiple RocksDB instances to store TiKV Region data, and the data of each Region is independently stored in a separate RocksDB instance. The new engine can better control the number and level of files in the RocksDB instance, achieve physical isolation of data operations between Regions, and support stably managing more data. You can see it as TiKV managing multiple RocksDB instances through partitioning, which is why the feature is named Partitioned-Raft-KV. The main advantage of this feature is better write performance, faster scaling, and larger volume of data supported with the same hardware. It can also support larger cluster scales. + + Currently, this feature is experimental and not recommended for use in production environments. + + For more information, see [documentation](https://docs.pingcap.com/tidb/v6.6/partitioned-raft-kv). + +#### DB operations + +* The TiKV-CDC tool is now GA and supports subscribing to data changes of RawKV [#48](https://github.com/tikv/migration/issues/48) @[zeminzhou](https://github.com/zeminzhou) @[haojinming](https://github.com/haojinming) @[pingyu](https://github.com/pingyu) + + TiKV-CDC is a CDC (Change Data Capture) tool for TiKV clusters. TiKV and PD can constitute a KV database when used without TiDB, which is called RawKV. TiKV-CDC supports subscribing to data changes of RawKV and replicating them to a downstream TiKV cluster in real time, thus enabling cross-cluster replication of RawKV. + + For more information, see [documentation](https://tikv.org/docs/latest/concepts/explore-tikv-features/cdc/cdc/). + +* Support configuring read-only storage nodes for resource-consuming tasks @[v01dstar](https://github.com/v01dstar) + + In production environments, some read-only operations might consume a large number of resources regularly and affect the performance of the entire cluster, such as backups and large-scale data reading and analysis. TiDB v6.6.0 supports configuring read-only storage nodes for resource-consuming read-only tasks to reduce the impact on the online application. Currently, TiDB, TiSpark, and BR support reading data from read-only storage nodes. You can configure read-only storage nodes according to [steps](https://docs.pingcap.com/tidb/v6.6/readonly-nodes#procedures) and specify where data is read through the system variable `tidb_replica_read`, the TiSpark configuration item `spark.tispark.replica_read`, or the br command line argument `--replica-read-label`, to ensure the stability of cluster performance. + + For more information, see [documentation](https://docs.pingcap.com/tidb/v6.6/readonly-nodes). + +* Support dynamically modifying `store-io-pool-size` [#13964](https://github.com/tikv/tikv/issues/13964) @[LykxSassinator](https://github.com/LykxSassinator) + + The TiKV configuration item [`raftstore.store-io-pool-size`](https://docs.pingcap.com/tidb/v6.6/tikv-configuration-file#store-io-pool-size-new-in-v530) specifies the allowable number of threads that process Raft I/O tasks, which can be adjusted when tuning TiKV performance. Before v6.6.0, this configuration item cannot be modified dynamically. Starting from v6.6.0, you can modify this configuration without restarting the server, which means more flexible performance tuning. + + For more information, see [documentation](https://docs.pingcap.com/tidb/v6.6/dynamic-config). diff --git a/content/docs/7.1/reference/CLI/introduction.md b/content/docs/7.1/reference/CLI/introduction.md new file mode 100644 index 00000000..c8ba74fc --- /dev/null +++ b/content/docs/7.1/reference/CLI/introduction.md @@ -0,0 +1,15 @@ +--- +title: CLI +description: Command-line interface tools used to administrate TiKV clusters +menu: + "7.1": + parent: Reference-7.1 + weight: 4 + identifier: CLI-7.1 +--- + +You can use the following command-line interface tools to administrate TiKV clusters: + +* [`tikv-ctl`](../tikv-ctl): A control plane tool for managing TiKV, both online and offline. +* [`pd-ctl`](../pd-ctl): A control plane tool for managing PD. +* [`pd-recover`](../pd-recover): A disaster recovery tool for PD. diff --git a/content/docs/7.1/reference/CLI/pd-ctl.md b/content/docs/7.1/reference/CLI/pd-ctl.md new file mode 100644 index 00000000..fd60b24a --- /dev/null +++ b/content/docs/7.1/reference/CLI/pd-ctl.md @@ -0,0 +1,922 @@ +--- +title: pd-ctl +description: Use PD Control to obtain the state information of a cluster and tune a cluster. +menu: + "7.1": + parent: CLI-7.1 + weight: 2 + identifier: pd-ctl-7.1 +--- + +As a command-line tool of PD, pd-ctl (PD Control) obtains the state information of the cluster and tunes the cluster. + +## Install PD Control + + {{< info >}} + It is recommended that the version of the Control tool you use is consistent with the version of the cluster. + {{< /info >}} + +### Use TiUP command + +To use PD Control, execute the `tiup ctl: pd -u http://: [-i]` command. + +### Download installation package + +Download the following installation package where PD Control binary locates. + +| Package download link | OS | Architecture | SHA256 checksum | +|:---------------------------------------------------------------- |:----- |:------------ |:---------------------------------------------------------------- | +| `https://download.pingcap.org/tidb-{version}-linux-amd64.tar.gz` | Linux | amd64 | `https://download.pingcap.org/tidb-{version}-linux-amd64.sha256` | + +{{< info >}} +`{version}` indicates the version number of TiKV. For example, if `{version}` is `v5.0.0`, the package download link is `https://download.pingcap.org/tidb-v5.0.0-linux-amd64.tar.gz`. +{{< /info >}} + +### Compile from source code + +1. [Go](https://golang.org/) Version 1.13 or later because the Go modules are used. +2. In the root directory of the [PD project](https://github.com/pingcap/pd), use the `make` or `make pd-ctl` command to compile and generate `bin/pd-ctl`. + +## Usage + +Single-command mode: + +```bash +./pd-ctl store -u http://127.0.0.1:2379 +``` + +Interactive mode: + +```bash +./pd-ctl -i -u http://127.0.0.1:2379 +``` + +Use environment variables: + +```bash +export PD_ADDR=http://127.0.0.1:2379 +./pd-ctl +``` + +Use TLS to encrypt: + +```bash +./pd-ctl -u https://127.0.0.1:2379 --cacert="path/to/ca" --cert="path/to/cert" --key="path/to/key" +``` + +## Command line flags + +### `--cacert` + ++ Specifies the path to the certificate file of the trusted CA in PEM format ++ Default: "" + +### `--cert` + ++ Specifies the path to the certificate of SSL in PEM format ++ Default: "" + +### `--detach` / `-d` + ++ Uses the single command line mode (not entering readline) ++ Default: true + +### `--help` / `-h` + ++ Outputs the help information ++ Default: false + +### `--interact` / `-i` + ++ Uses the interactive mode (entering readline) ++ Default: false + +### `--key` + ++ Specifies the path to the certificate key file of SSL in PEM format, which is the private key of the certificate specified by `--cert` ++ Default: "" + +### `--pd` / `-u` + ++ Specifies the PD address ++ Default address: `http://127.0.0.1:2379` ++ Environment variable: `PD_ADDR` + +### `--version` / `-V` + ++ Prints the version information and exit ++ Default: false + +## Command + +### `cluster` + +Use this command to view the basic information of the cluster. + +Usage: + +```bash +>> cluster // To show the cluster information +{ + "id": 6493707687106161130, + "max_peer_count": 3 +} +``` + +### `config [show | set