共计 12242 个字符,预计需要花费 31 分钟才能阅读完成。
这个东东整了我 2 天多。。我有点无语。。原本就很简略的货色。。有点无辜。开发还是本地搞环境好,问题少????
seata/seata
Seata 是什么
一、配置数据库
create database seata;
use seata;
-- -------------------------------- The script used when storeMode is 'db' --------------------------------
-- the table to store GlobalSession data
CREATE TABLE IF NOT EXISTS `global_table`
(`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`status` TINYINT NOT NULL,
`application_id` VARCHAR(32),
`transaction_service_group` VARCHAR(32),
`transaction_name` VARCHAR(128),
`timeout` INT,
`begin_time` BIGINT,
`application_data` VARCHAR(2000),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`xid`),
KEY `idx_gmt_modified_status` (`gmt_modified`, `status`),
KEY `idx_transaction_id` (`transaction_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store BranchSession data
CREATE TABLE IF NOT EXISTS `branch_table`
(
`branch_id` BIGINT NOT NULL,
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`resource_group_id` VARCHAR(32),
`resource_id` VARCHAR(256),
`branch_type` VARCHAR(8),
`status` TINYINT,
`client_id` VARCHAR(64),
`application_data` VARCHAR(2000),
`gmt_create` DATETIME(6),
`gmt_modified` DATETIME(6),
PRIMARY KEY (`branch_id`),
KEY `idx_xid` (`xid`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store lock data
CREATE TABLE IF NOT EXISTS `lock_table`
(`row_key` VARCHAR(128) NOT NULL,
`xid` VARCHAR(96),
`transaction_id` BIGINT,
`branch_id` BIGINT NOT NULL,
`resource_id` VARCHAR(256),
`table_name` VARCHAR(32),
`pk` VARCHAR(36),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`row_key`),
KEY `idx_branch_id` (`branch_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
二、配置 nacos
这边次要是把相干配置上传到 nacos 中,不便 seata-center 读取
下载 nacos-config.sh 和 config.txt
seata/seata
批改 config.txt
数据库是 8.0 以上的话,驱动得换成 com.mysql.cj.jdbc.Driver
nsport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableClientBatchSendRequest=false
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
service.vgroupMapping.storage-service-group=default
service.vgroupMapping.order-service-group=default
service.default.grouplist=127.0.0.1:8091
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=false
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
store.mode=db
store.file.dir=file_store/data
store.file.maxBranchSessionSize=16384
store.file.maxGlobalSessionSize=512
store.file.fileWriteBufferCacheSize=16384
store.file.flushDiskMode=async
store.file.sessionReloadReadSize=100
store.db.datasource=druid
store.db.dbType=mysql
store.db.driverClassName=com.mysql.cj.jdbc.Driver
store.db.url=jdbc:mysql://localhost:3306/seata?useUnicode=true
store.db.user=guest
store.db.password=123456
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
client.undo.dataValidation=true
client.undo.logSerialization=jackson
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898
上传配置到 nacos
留神点是 config.txt 放在 conf 目录外边,nacos-config 放在 conf 文件夹上面
关上 nacos-config.sh 能看到几个参数,不指定的话就是默认的
ring4all.com/FgE8jEIti9gdI3_TPnrBsKz8dA90)
./nacos-config.sh -h loclahost -p 8848 -g SEAT_GROUP
三、配置 seata-center
这个 seata-center 是配置在阿里云 ECS 上的,所以有很多问题,失常本人服务和我的项目都是本地的话照着官网文档根本就 OK 了
下载最新的二进制安装包
下载核心
解压
unzip xxx -d /opt
批改 conf/register.conf
registry {
# file、nacos、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
application = "seata-server"
serverAddr = "localhost:8848"
group = "SEATA_GROUP"
namespace = "public"
cluster = "default"
username = "nacos"
password = "nacos"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = 0
password = ""cluster ="default"
timeout = 0
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""password =""
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {name = "file.conf"}
}
config {
# file、nacos、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "localhost:8848"
namespace = "public"
group = "SEATA_GROUP"
username = "nacos"
password = "nacos"
}
consul {serverAddr = "127.0.0.1:8500"}
apollo {
appId = "seata-server"
apolloMeta = "http://192.168.1.204:8801"
namespace = "application"
}
zk {
serverAddr = "127.0.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""password =""
}
etcd3 {serverAddr = "http://localhost:2379"}
file {name = "file.conf"}
}
启动
这边有个坑,肯定要指明公网 IP 地址,不然默认客户端拿到的是内网 IP,是拜访不到的
./bin/seata-server.sh -h 公网 ip(和我的项目同网就不必指定了)
四、配置我的项目
依赖
compile('com.alibaba.cloud:spring-cloud-starter-alibaba-seata')
yml 配置
这边截取了局部相干配置
spring:
cloud:
nacos:
discovery:
server-addr: ${dev.nacos.host}:${dev.port.nacos}
alibaba:
seata:
tx-service-group: default
seata:
registry:
type: nacos
nacos:
server-addr: ${dev.nacos.host}:${dev.port.nacos}
namespace:
config:
type: nacos
nacos:
server-addr: ${dev.nacos.host}:${dev.port.nacos}
namespace:
五、常见问题
没有找到服务
2021-02-03 16:28:39.015 DEBUG 13168 --- [io-18203-exec-2] s.n.www.protocol.http.HttpURLConnection : sun.net.www.MessageHeader@48a19bfa14 pairs: {GET /nacos/v1/cs/configs?dataId=service.vgroupMapping.default&group=SEATA_GROUP HTTP/1.1: null}{Content-Type: application/json;charset=UTF-8}{Accept-Charset: UTF-8}{Accept-Encoding: gzip}{Client-AppName: unknown}{Client-RequestTS: 1612340919015}{Client-RequestToken: 0982521287eb58bf48e96fb68a6bd6d7}{Client-Version: 1.3.3}{exConfigInfo: true}{RequestId: ce7a75ab-2689-44b2-8b53-ad03589c2eb5}{User-Agent: Java/1.8.0_171}{Host: njpkhuan.top:8848}{Accept: text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2}{Connection: keep-alive}
2021-02-03 16:28:39.035 DEBUG 13168 --- [io-18203-exec-2] s.n.www.protocol.http.HttpURLConnection : sun.net.www.MessageHeader@4c9e16366 pairs: {null: HTTP/1.1 404}{Content-Type: application/json;charset=UTF-8}{Content-Length: 22}{Date: Wed, 03 Feb 2021 08:28:38 GMT}{Keep-Alive: timeout=60}{Connection: keep-alive}
2021-02-03 16:28:39.036 DEBUG 13168 --- [io-18203-exec-2] o.j.s.OpenEntityManagerInViewInterceptor : Closing JPA EntityManager in OpenEntityManagerInViewInterceptor
2021-02-03 16:28:39.037 DEBUG 13168 --- [io-18203-exec-2] o.s.web.servlet.DispatcherServlet : Failed to complete request: io.seata.common.exception.FrameworkException: No available service
2021-02-03 16:28:39.037 DEBUG 13168 --- [io-18203-exec-2] o.s.s.w.h.S.SESSION_LOGGER : No session found by id: Caching result for getSession(false) for this HttpServletRequest.
2021-02-03 16:28:39.037 DEBUG 13168 --- [io-18203-exec-2] o.s.s.w.header.writers.HstsHeaderWriter : Not injecting HSTS header since it did not match the requestMatcher org.springframework.security.web.header.writers.HstsHeaderWriter$SecureRequestMatcher@209091f0
2021-02-03 16:28:39.037 DEBUG 13168 --- [io-18203-exec-2] s.s.w.c.SecurityContextPersistenceFilter : SecurityContextHolder now cleared, as request processing completed
2021-02-03 16:28:39.038 ERROR 13168 --- [io-18203-exec-2] o.a.c.c.C.[.[.[.[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [/service-gov] threw exception [Request processing failed; nested exception is io.seata.common.exception.FrameworkException: No available service] with root cause
io.seata.common.exception.FrameworkException: No available service
at io.seata.core.rpc.netty.AbstractNettyRemotingClient.loadBalance(AbstractNettyRemotingClient.java:257) ~[seata-all-1.3.0.jar:1.3.0]
at io.seata.core.rpc.netty.AbstractNettyRemotingClient.sendSyncRequest(AbstractNettyRemotingClient.java:133) ~[seata-all-1.3.0.jar:1.3.0]
at io.seata.tm.DefaultTransactionManager.syncCall(DefaultTransactionManager.java:95) ~[seata-all-1.3.0.jar:1.3.0]
at io.seata.tm.DefaultTransactionManager.begin(DefaultTransactionManager.java:53) ~[seata-all-1.3.0.jar:1.3.0]
at io.seata.tm.api.DefaultGlobalTransaction.begin(DefaultGlobalTransaction.java:104) ~[seata-all-1.3.0.jar:1.3.0]
at io.seata.tm.api.TransactionalTemplate.beginTransaction(TransactionalTemplate.java:175) ~[seata-all-1.3.0.jar:1.3.0]
at io.seata.tm.api.TransactionalTemplate.execute(TransactionalTemplate.java:98) ~[seata-all-1.3.0.jar:1.3.0]
at io.seata.spring.annotation.GlobalTransactionalInterceptor.handleGlobalTransaction(GlobalTransactionalInterceptor.java:147) ~[seata-all-1.3.0.jar:1.3.0]
at io.seata.spring.annotation.GlobalTransactionalInterceptor.invoke(GlobalTransactionalInterceptor.java:122) ~[seata-all-1.3.0.jar:1.3.0]
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) ~[spring-aop-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:749) ~[spring-aop-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691) ~[spring-aop-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at com.fedtech.gov.provider.controller.TestController$$EnhancerBySpringCGLIB$$340d4fe6.saveUser(<generated>) ~[classes/:na]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:1.8.0_171]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:1.8.0_171]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_171]
at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_171]
at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190) ~[spring-web-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138) ~[spring-web-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:105) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:878) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:792) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at org.springframework.web.servlet.FrameworkServlet.doGet(FrameworkServlet.java:898) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
at javax.servlet.http.HttpServlet.service(HttpServlet.java:626) ~[tomcat-embed-core-9.0.41.jar:4.0.FR]
at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883) ~[spring-webmvc-5.2.12.RELEASE.jar:5.2.12.RELEASE]
这种比较简单,只须要在 nacos 上新增一个对应的配置就能够了,配置内容就是这个 dataid 和 group,值是 default
compile(‘com.alibaba.cloud:spring-cloud-starter-alibaba-seata’)
本文由博客群发一文多发等经营工具平台 OpenWrite 公布
正文完