配置

[root@p0-xxxx-jcsszy-web36 bin]# cat ../conf/file.conf transport {  # tcp udt unix-domain-socket  type = "TCP"  #NIO NATIVE  server = "NIO"  #enable heartbeat  heartbeat = true  #thread factory for netty  thread-factory {    boss-thread-prefix = "NettyBoss"    worker-thread-prefix = "NettyServerNIOWorker"    server-executor-thread-prefix = "NettyServerBizHandler"    share-boss-worker = false    client-selector-thread-prefix = "NettyClientSelector"    client-selector-thread-size = 1    client-worker-thread-prefix = "NettyClientWorkerThread"    # netty boss thread size,will not be used for UDT    boss-thread-size = 1    #auto default pin or 8    worker-thread-size = 8  }  shutdown {    # when destroy server, wait seconds    wait = 3  }  serialization = "seata"  compressor = "none"}service {  #vgroup->rgroup  vgroup_mapping.my_test_tx_group = "wsm-upms-seata-group"  #only support single node  default.grouplist = "127.0.0.1:8091"  #degrade current not support  enableDegrade = false  #disable  disable = false  #unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent  max.commit.retry.timeout = "-1"  max.rollback.retry.timeout = "-1"}client {  async.commit.buffer.limit = 10000  lock {    retry.internal = 10    retry.times = 30  }  report.retry.count = 5  tm.commit.retry.count = 1  tm.rollback.retry.count = 1}## transaction log storestore {  ## store mode: file、db  mode = "db"  ## file store  file {    dir = "sessionStore"    # branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions    max-branch-session-size = 16384    # globe session size , if exceeded throws exceptions    max-global-session-size = 512    # file buffer size , if exceeded allocate new buffer    file-write-buffer-cache-size = 16384    # when recover batch read size    session.reload.read_size = 100    # async, sync    flush-disk-mode = async  }  ## database store  db {    ## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.    datasource = "druid"    ## mysql/oracle/h2/oceanbase etc.    db-type = "mysql"    driver-class-name = "com.mysql.jdbc.Driver"    url = "jdbc:mysql://10.132.128.25:3306/seata"    user = "srv_zzzhfw"    password = "xxxxxx01"    min-conn = 1    max-conn = 3    global.table = "global_table"    branch.table = "branch_table"    lock-table = "lock_table"    query-limit = 100  }}lock {  ## the lock store mode: local、remote  mode = "remote"  local {    ## store locks in user's database  }  remote {    ## store locks in the seata's server  }}recovery {  #schedule committing retry period in milliseconds  committing-retry-period = 1000  #schedule asyn committing retry period in milliseconds  asyn-committing-retry-period = 1000  #schedule rollbacking retry period in milliseconds  rollbacking-retry-period = 1000  #schedule timeout retry period in milliseconds  timeout-retry-period = 1000}transaction {  undo.data.validation = true  undo.log.serialization = "jackson"  undo.log.save.days = 7  #schedule delete expired undo_log in milliseconds  undo.log.delete.period = 86400000  undo.log.table = "undo_log"}## metrics settingsmetrics {  enabled = false  registry-type = "compact"  # multi exporters use comma divided  exporter-list = "prometheus"  exporter-prometheus-port = 9898}support {  ## spring  spring {    # auto proxy the DataSource bean    datasource.autoproxy = false  }}[root@p0-xxxx-jcsszy-web36 bin]# [root@p0-xxxx-jcsszy-web36 bin]# [root@p0-xxxx-jcsszy-web36 bin]# cat ../conf/registry.conf registry {  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa  type = "nacos"  nacos {    serverAddr = "api.xxxxhos.xxxxhealthcare.com:80"    namespace = "067d2017-c1f1-42b8-904e-21e0139f6861"    cluster = "default"  }  eureka {    serviceUrl = "http://localhost4:8761/eureka"    application = "seata-server"    package-max-size = 8388608    weight = "1"  }  redis {    serverAddr = "localhost:6379"    db = "0"  }  zk {    cluster = "default"    serverAddr = "127.0.0.1:2181"    session.timeout = 6000    connect.timeout = 2000  }  consul {    cluster = "default"    serverAddr = "127.0.0.1:8500"  }  etcd3 {    cluster = "default"    serverAddr = "http://localhost:2379"  }  sofa {    serverAddr = "127.0.0.1:9603"    application = "default"    region = "DEFAULT_ZONE"    datacenter = "DefaultDataCenter"    cluster = "default"    group = "SEATA_GROUP"    addressWaitTime = "3000"  }  file {    name = "file.conf"  }}config {  # file、nacos 、apollo、zk、consul、etcd3  type = "file"  nacos {    serverAddr = "api.xxxxhos.xxxxhealthcare.com:80"    namespace = "067d2017-c1f1-42b8-904e-21e0139f6861"    group = "SEATA_GROUP"  }  consul {    serverAddr = "127.0.0.1:8500"  }  apollo {    app.id = "seata-server"    apollo.meta = "http://192.168.1.204:8801"  }  zk {    serverAddr = "127.0.0.1:2181"    session.timeout = 6000    connect.timeout = 2000  }  etcd3 {    serverAddr = "http://localhost:2379"  }  file {    name = "file.conf"  }}[root@p0-xxxx-jcsszy-web36 bin]# pwd/data/seata/bin

启动

[root@p0-xxxx-jcsszy-web36 bin]# pwd/data/seata/bin[root@p0-xxxx-jcsszy-web36 bin]# cat startup.sh #!/bin/bashnohup sh ./seata-server.sh -p 8091 -h 10.133.136.207 -m db &