关于tidb:tidb单机部署实例

5次阅读

共计 6826 个字符,预计需要花费 18 分钟才能阅读完成。

1. 集群部署相干命令

参考《TiDB 软件和硬件环境倡议配置》,以下操作 Ubuntu LTS 零碎上。

a. 筹备 ubuntu 环境 1: 磁盘分区,swap 敞开

参考:官网 磁盘格式化

# 实例:4t Nvme 固态
parted /dev/nvme0n1
(parted) print
(parted) mklabel gpt 
(parted) mkpart primary 0KB 4TB 
(parted) print

mkfs.ext4 -F /dev/nvme0n1
# 查看文件系统类型
blkid

vi /etc/fstab
UUID="040c7a9c-1dd3-494c-b7b9-298ec2165b83"   /DATA4T   ext4    defaults,nodelalloc,noatime    0    2

echo "vm.swappiness = 0">> /etc/sysctl.conf
swapoff -a && swapon -a
# reboot

b. 零碎设置 2: 工具装置、其余设置,免登陆

ufw disable
apt-get install ntp curl net-tools ifupdown network-scripts openssh-server mysql-client-core-8.0

# 查看网卡名称
ifconfig 
# 设置动态 IP
gedit  /etc/network/interfaces
# interfaces(5) file used by ifup(8) and ifdown(8)
auto lo
iface lo inet loopback
 
auto enp4s0
iface enp4s0 inet static
address 192.168.1.11
gateway 192.168.1.1
netmask 255.255.255.0

# SSH 设置
vi /etc/ssh/sshd_config
++
MaxSessions 20

service sshd restart
# 增加用户, 免登陆
adduser tidb
passwd tidb
vi /etc/sudoers
++
tidb ALL=(ALL)  NOPASSWD:ALL

c. 部署配置:

参考:官网 应用 TiUP 部署(举荐)

curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh
source .bashrc
tiup cluster template > topology.yaml

批改 topology.yaml,单机配置:

# # Global variables are applied to all deployments and used as the default value of
# # the deployments if a specific deployment value is missing.
global:
  user: "tidb"
  ssh_port: 22
  deploy_dir: "/DATA4T/tidb-deploy"
  # # TiDB Cluster data storage directory
  data_dir: "/DATA4T/tidb-data"
  arch: "amd64"

monitored:
  node_exporter_port: 9100
  blackbox_exporter_port: 9115

server_configs:
  tidb:
    log.slow-threshold: 300
  tikv:
    readpool.storage.use-unified-pool: false
    readpool.coprocessor.use-unified-pool: true
  pd:
    replication.enable-placement-rules: true
    replication.location-labels:
      - host
  tiflash:
    logger.level: "info"

pd_servers:
  - host: 192.168.1.11

tidb_servers:
  - host: 192.168.1.11
    port: 4000
    status_port: 10080
    deploy_dir: "/DATA4T/tidb-deploy/tidb-4000"
    log_dir: "/DATA4T/tidb-deploy/tidb-4000/log"
  - host: 192.168.1.11
    port: 4001
    status_port: 10081
    deploy_dir: "/DATA4T/tidb-deploy/tidb-4001"
    log_dir: "/DATA4T/tidb-deploy/tidb-4001/log"
  - host: 192.168.1.11
    port: 4002
    status_port: 10082
    deploy_dir: "/DATA4T/tidb-deploy/tidb-4002"
    log_dir: "/DATA4T/tidb-deploy/tidb-4002/log"

tikv_servers:
  - host: 192.168.1.11
    port: 20160
    status_port: 20180
    deploy_dir: "/DATA4T/data1/tidb-deploy/tikv-20160"
    data_dir: "/DATA4T/data1/tidb-data/tikv-20160"
    log_dir: "/DATA4T/data1/tidb-deploy/tikv-20160/log"
    config:
      server.labels: {host: "192.168.1.11"}
  - host: 192.168.1.11
    port: 20161
    status_port: 20181
    deploy_dir: "/DATA4T/data2/tidb-deploy/tikv-20161"
    data_dir: "/DATA4T/data2/tidb-data/tikv-20161"
    log_dir: "/DATA4T/data2/tidb-deploy/tikv-20161/log"
    config:
      server.labels: {host: "192.168.1.11"}
  - host: 192.168.1.11
    # ssh_port: 22
    port: 20162
    status_port: 20182
    deploy_dir: "/DATA4T/data3/tidb-deploy/tikv-20162"
    data_dir: "/DATA4T/data3/tidb-data/tikv-20162"
    log_dir: "/DATA4T/data3/tidb-deploy/tikv-20162/log"
    # numa_node: "0"
    config:
      server.labels: {host: "192.168.1.11"}

tiflash_servers:
  - host: 192.168.1.11
    tcp_port: 9000
    http_port: 8123
    flash_service_port: 3930
    flash_proxy_port: 20170
    flash_proxy_status_port: 20292
    metrics_port: 8234
    deploy_dir: /DATA4T/data1/tidb-deploy/tiflash-9000
    data_dir: /DATA4T/data1/tidb-data/tiflash-9000
    log_dir: /DATA4T/data1/tidb-deploy/tiflash-9000/log
  - host: 192.168.1.11
    tcp_port: 9001
    http_port: 8124
    flash_service_port: 3931
    flash_proxy_port: 20171
    flash_proxy_status_port: 20293
    metrics_port: 8235
    deploy_dir: /DATA4T/data2/tidb-deploy/tiflash-9001
    data_dir: /DATA4T/data2/tidb-data/tiflash-9001
    log_dir: /DATA4T/data2/tidb-deploy/tiflash-9001/log
  - host: 192.168.1.11
    # ssh_port: 22
    tcp_port: 9002
    http_port: 8125
    flash_service_port: 3932
    flash_proxy_port: 20172
    flash_proxy_status_port: 20294
    metrics_port: 8236
    deploy_dir: /DATA4T/data3/tidb-deploy/tiflash-9002
    data_dir: /DATA4T/data3/tidb-data/tiflash-9002
    log_dir: /DATA4T/data3/tidb-deploy/tiflash-9002/log

monitoring_servers:
  - host: 192.168.1.11
  
grafana_servers:
  - host: 192.168.1.11

alertmanager_servers:
  - host: 192.168.1.11

利用配置,并尝试启动集群,验证

tiup cluster deploy tidb-test v6.1.0 ./topology.yaml --user tidb -p
tiup cluster start tidb-test --init
Started cluster `tidb-test` successfully
The root password of TiDB database has been changed.
The new password is: 'V+12S&Bh5*JY91_3EZ'.
Copy and record it to somewhere safe, it is only displayed once, and will not be stored.
The generated password can NOT be get and shown again.

tiup cluster display tidb-test
mysql -u root -h 192.168.1.11 -P 4000 -p

验证 mysql 连贯、运维后盾[http://127.0.0.1:2379/dashboard/],用户 root、明码如上。

2. 数据迁徙

a. 应用 dumpling 导出 mysql

tiup install dumpling
# 近程数据库连贯验证
mysql -h192.168.1.10 -P3307 -uroot -p123456

导出 mysql

tiup dumpling -u root -P 3307 -h 192.168.1.10 -p 123456 -o ./export/table1 -F 8G -T jobdb.tb1 -t 20 --compress gzip
tiup dumpling -u root -P 3307 -h 192.168.1.10 -p 123456 -o ./export/table1 -F 8G -T jobdb.tb2 -t 20 --compress gzip


tiup dumpling -u root -P 3307 -h 192.168.1.10 -p dA1mJ3jA1eC6hA6gU0fV5eA0bM0sI5dR -o ./export/table1 -F 8G -T table1.page -t 20 --compress gzip
tiup dumpling -u root -P 3307 -h 192.168.1.10 -p dA1mJ3jA1eC6hA6gU0fV5eA0bM0sI5dR -o ./export/table1 -F 8G -T table1.page_analysis -t 20 --compress gzip

b. 建设上游表构造

从大数据量分库分表 MySQL 合并迁徙数据到 TiDB

# 原 mysql 库
show create table table;
# tidb 4000 端口库
# 执行导入建表语句,同步库、表构造

c. 应用 tidb-lightning 导入数据

批量选中 *.sql.gz 文件,抉择解压目录到磁盘

tiup tidb-lightning -config tidb-lightning.toml
# 期待实现

数据库查看后果,曾经分区过的大表查问后果:

mysql> select count(*) from page;
+-----------+
| count(*)  |
+-----------+
| 104408407 |
+-----------+
1 row in set (4.51 sec)

很快,完满。

d. 创立新用户

select user,host from user;
select user,host,plugin,authentication_string from mysql.user;
select user,host,plugin from mysql.user;

-- 13 腾讯云
drop user 'search'@'%';
create user search@'%' identified by 'qq8d#p^azkzqvkyWHQNTd8M6HKb8##V9';
grant all privileges on *.* to search@'%';
flush privileges;

drop user 'server'@'%';
create user server@'%' identified by 'h@VvZIDwGERTxM^pnw1PrdEj!VgIcZ#V';
grant all privileges on complaint_paper.* to server@'%';
flush privileges;

drop user 'test'@'%';
create user test@'%' identified by '123456t';
grant select,insert on *.* to test@'%';
flush privileges;


-- 11 tidb => db
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'Fn0AxdMrqcwLPrwLa9bR1cJ9IF#x0Gzh';

drop user 'search'@'%';
create user search@'%' identified by 'qq8d#p^azfzqvkyWHQNTd8M6HKb8##V9';
update mysql.user set authentication_string = ''where user ='search';
FLUSH PRIVILEGES;
update mysql.user set host = '%', plugin = 'mysql_native_password' where user = 'search';
ALTER USER 'search'@'%' IDENTIFIED WITH mysql_native_password BY 'qq8d#p^azfzqvkyWHQNTd8M6HKb8##V9';
grant all privileges on *.* to search@'%';
flush privileges;

drop user 'tidb_job'@'%';
create user tidb_job@'%' identified by 'Gy1sDiq8sgzlJeT3WEZ&RZ4BNowy#tY1';
update mysql.user set authentication_string = ''where user ='tidb_job';
FLUSH PRIVILEGES;
update mysql.user set host = '%', plugin = 'mysql_native_password' where user = 'tidb_job';
ALTER USER 'tidb_job'@'%' IDENTIFIED WITH mysql_native_password BY 'Gy1sDiq8sgzlJeT3WEZ&RZ4BNowy#tY1';
grant all privileges on table1.* to tidb_job@'%';
grant all privileges on platform.* to tidb_job@'%';
flush privileges;

drop user 'test'@'%';
create user test@'%' identified by '123456t';
update mysql.user set authentication_string = ''where user ='test';
FLUSH PRIVILEGES;
update mysql.user set host = '%', plugin = 'mysql_native_password' where user = 'test';
ALTER USER 'test'@'%' IDENTIFIED WITH mysql_native_password BY '123456t';
grant select,insert on *.* to test@'%';
flush privileges;
正文完
 0