环境
Mysql
我使用的mysql8版本,部署在10.0.3.164服务器上,需要创建一个seata数据库并准备SQL脚本
参考seata/script/server/db at 1.4.0 · seata/seata · GitHub
-- -------------------------------- The script used when storeMode is 'db' --------------------------------
-- the table to store GlobalSession data
CREATE TABLE IF NOT EXISTS `global_table`
(
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`status` TINYINT NOT NULL,
`application_id` VARCHAR(32),
`transaction_service_group` VARCHAR(32),
`transaction_name` VARCHAR(128),
`timeout` INT,
`begin_time` BIGINT,
`application_data` VARCHAR(2000),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`xid`),
KEY `idx_gmt_modified_status` (`gmt_modified`, `status`),
KEY `idx_transaction_id` (`transaction_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store BranchSession data
CREATE TABLE IF NOT EXISTS `branch_table`
(
`branch_id` BIGINT NOT NULL,
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`resource_group_id` VARCHAR(32),
`resource_id` VARCHAR(256),
`branch_type` VARCHAR(8),
`status` TINYINT,
`client_id` VARCHAR(64),
`application_data` VARCHAR(2000),
`gmt_create` DATETIME(6),
`gmt_modified` DATETIME(6),
PRIMARY KEY (`branch_id`),
KEY `idx_xid` (`xid`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store lock data
CREATE TABLE IF NOT EXISTS `lock_table`
(
`row_key` VARCHAR(128) NOT NULL,
`xid` VARCHAR(96),
`transaction_id` BIGINT,
`branch_id` BIGINT NOT NULL,
`resource_id` VARCHAR(256),
`table_name` VARCHAR(32),
`pk` VARCHAR(36),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`row_key`),
KEY `idx_branch_id` (`branch_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
Nacos
nacos我部署到了10.0.3.164这台服务器上
中文官网:home
seata版本1.4.2,springcloud中依赖的seata版本要和stata服务一致
从1.4.2版本开始支持从一个Nacos dataId中获取所有配置信息
seata部署到10.0.3.171服务器上,开放8091端口,或者关闭防火墙,springcloud会访问这台服务器,所以要保证能被访问到,否则会出现connect错误
中文官网:Seata
下载seata-server-1.4.2.tar.gz
解压后配置file.conf和registry.conf
file.conf中配置使用db方式保存事务相关信息,mysql版本我使用的是8,要修改一下对应的驱动
store {
mode = "db"
publicKey = ""
db {
datasource = "druid"
dbType = "mysql"
driverClassName = "com.mysql.cj.jdbc.Driver"
url = "jdbc:mysql://10.0.3.164:3306/seata?rewriteBatchedStatements=true"
user = "root"
password = "123456"
minConn = 5
maxConn = 100
globalTable = "global_table"
branchTable = "branch_table"
lockTable = "lock_table"
queryLimit = 100
maxWait = 5000
}
}
registry.conf中配置注册中心和配置中心为nacos
registry {
type = "nacos"
nacos {
application = "seata-server"
serverAddr = "10.0.3.164:8848"
group = "SEATA_GROUP"
namespace = "luck-cloud"
cluster = "default"
username = "nacos"
password = "nacos"
}
}
config {
type = "nacos"
nacos {
serverAddr = "10.0.3.164:8848"
namespace = "luck-cloud"
group = "SEATA_GROUP"
username = "nacos"
password = "nacos"
dataId = "seataServer.properties"
}
}
nacos中新创建一个命名空间luck-cloud、当然也可以使用默认的
在luck-cloud这个命名空间中创建一个Data Id为seataServer.properties的
Group为SEATA_GROUP的配置文件
参考官方的配置,从git上找到配置文件,复制内容到nacos的seataServer.properties中 https://github.com/seata/seata/tree/develop/script/config-center 的config.txt
主要修改db的部分,把数据库配置修改一下,还需要修改
service.vgroupMapping.cloud-luckserver=default service.default.grouplist=10.0.3.171:8091
这两处,其中cloud-luckserver这个根据业务自己写一个就行,不要太长,然后default这两处的配置要一致,这个是集群地址,默认就用default就行
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableClientBatchSendRequest=false
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
service.vgroupMapping.cloud-luckserver=default
service.default.grouplist=10.0.3.171:8091
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=false
client.rm.tableMetaCheckerInterval=60000
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
client.tm.defaultGlobalTransactionTimeout=60000
client.tm.degradeCheck=false
client.tm.degradeCheckAllowTimes=10
client.tm.degradeCheckPeriod=2000
store.mode=db
store.file.dir=file_store/data
store.file.maxBranchSessionSize=16384
store.file.maxGlobalSessionSize=512
store.file.fileWriteBufferCacheSize=16384
store.file.flushDiskMode=async
store.file.sessionReloadReadSize=100
store.db.datasource=druid
store.db.dbType=mysql
store.db.driverClassName=com.mysql.cj.jdbc.Driver
store.db.url=jdbc:mysql://10.0.3.164:3306/seata?useUnicode=true&rewriteBatchedStatements=true
store.db.user=root
store.db.password=123456
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000
store.redis.mode=single
store.redis.single.host=127.0.0.1
store.redis.single.port=6379
store.redis.maxConn=10
store.redis.minConn=1
store.redis.maxTotal=100
store.redis.database=0
store.redis.queryLimit=100
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
client.undo.dataValidation=true
client.undo.logSerialization=jackson
client.undo.onlyCareUpdateColumns=true
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.undo.compress.enable=true
client.undo.compress.type=zip
client.undo.compress.threshold=64k
log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898
基础环境准备完了,
启动nacos
./startup.sh
启动seata
nohup ./seata-server.sh -h 10.0.3.171 -p 8091 &
项目配置
maven依赖
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-config</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-seata</artifactId>
<exclusions>
<exclusion>
<groupId>io.seata</groupId>
<artifactId>seata-spring-boot-starter</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-spring-boot-starter</artifactId>
<version>1.4.2</version>
</dependency>
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-all</artifactId>
<version>1.4.2</version>
</dependency>
application.properties配置
spring.application.name=cloud-luckserver
spring.cloud.nacos.discovery.server-addr=10.0.3.164:8848
spring.cloud.nacos.discovery.namespace=luck-cloud
#mysql项目数据源1配置
spring.datasource.druid.first.url=jdbc:mysql://10.0.3.164:3306/business1?allowMultiQueries=true&useUnicode=true&characterEncoding=utf8&useSSL=false
spring.datasource.druid.first.username=root
spring.datasource.druid.first.password=123456
#mysql项目数据源2配置
spring.datasource.druid.second.url=jdbc:mysql://10.0.3.164:3306/business2?allowMultiQueries=true&useUnicode=true&characterEncoding=utf8&useSSL=false
spring.datasource.druid.second.username=root
spring.datasource.druid.second.password=123456
spring.datasource.druid.driver-class-name=com.mysql.cj.jdbc.Driver
spring.datasource.druid.initial-size=1
spring.datasource.druid.min-idle=1
spring.datasource.druid.max-active=20
spring.datasource.druid.max-wait=30000
spring.datasource.druid.time-between-eviction-runs-millis=60000
spring.datasource.druid.min-evictable-idle-time-millis=300000
spring.datasource.druid.validation-query=SELECT'x'
spring.datasource.druid.test-while-idle=true
spring.datasource.druid.test-on-borrow=false
spring.datasource.druid.remove-abandoned=true
spring.datasource.druid.remove-abandoned-timeout=60
spring.datasource.druid.filter=stat
spring.cloud.alibaba.seata.tx-service-group=cloud-luckserver
seata.config.type=nacos
seata.config.nacos.data-id=seataServer.properties
seata.config.nacos.namespace=luck-cloud
seata.config.nacos.server-addr=10.0.3.164:8848
seata.config.nacos.group=SEATA_GROUP
seata.config.nacos.password=nacos
seata.config.nacos.username=nacos
seata.registry.type=nacos
seata.registry.nacos.namespace=luck-cloud
seata.registry.nacos.server-addr=10.0.3.164:8848
seata.registry.nacos.group=SEATA_GROUP
seata.registry.nacos.application=seata-server
seata.registry.nacos.cluster=default
seata.registry.nacos.password=nacos
seata.registry.nacos.username=nacos
seata.enabled=true
seata.service.vgroup-mapping.cloud-luckserver=default
seata.service.grouplist.default=10.0.3.171:8091
seata.service.disable-global-transaction=false
各个业务库需要添加undolog表seata/script/client at 1.4.0 · seata/seata · GitHub
-- for AT mode you must to init this sql for you business database. the seata server not need it.
CREATE TABLE IF NOT EXISTS `undo_log`
(
`branch_id` BIGINT(20) NOT NULL COMMENT 'branch transaction id',
`xid` VARCHAR(100) NOT NULL COMMENT 'global transaction id',
`context` VARCHAR(128) NOT NULL COMMENT 'undo_log context,such as serialization',
`rollback_info` LONGBLOB NOT NULL COMMENT 'rollback info',
`log_status` INT(11) NOT NULL COMMENT '0:normal status,1:defense status',
`log_created` DATETIME(6) NOT NULL COMMENT 'create datetime',
`log_modified` DATETIME(6) NOT NULL COMMENT 'modify datetime',
UNIQUE KEY `ux_undo_log` (`xid`, `branch_id`)
) ENGINE = InnoDB
AUTO_INCREMENT = 1
DEFAULT CHARSET = utf8 COMMENT ='AT transaction mode undo table';
到这里springcloud集成nacos和seata就已经完成了,在需要全局分布式事务的接口或者service上加上@GlobalTransactional即可
接下来是数据源的切换,这个不是必要的操作,参考我之前写的Springboot+Druid+Mybatis+Atomikos动态切换多数据源,分布式事务的实现
?动态切换数据源
数据源注解,不同数据源的service方法上加上@DataSource注解,使用AOP切换数据源
package com.luck.datasources.annotation;
import java.lang.annotation.*;
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface DataSource {
String name() default "";
}
动态数据源
package com.luck.datasources;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
public class DynamicDataSource extends AbstractRoutingDataSource {
private static final ThreadLocal<String> CONTEXTHOLDER = new ThreadLocal<>();
@Override
protected Object determineCurrentLookupKey() {
return getDataSource();
}
public static void setDataSource(String dataSource) {
CONTEXTHOLDER.set(dataSource);
}
public static String getDataSource() {
return CONTEXTHOLDER.get();
}
public static void clearDataSource() {
CONTEXTHOLDER.remove();
}
}
数据源AOP
package com.luck.datasources.aspect;
import java.lang.reflect.Method;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.aspectj.lang.reflect.MethodSignature;
import org.springframework.core.Ordered;
import org.springframework.stereotype.Component;
import com.luck.datasources.DynamicDataSource;
import com.luck.datasources.annotation.DataSource;
@Aspect
@Component
public class DataSourceAspect implements Ordered {
@Pointcut("@annotation(com.luck.datasources.annotation.DataSource)")
public void dataSourcePointCut() {
}
@Around("dataSourcePointCut()")
public Object around(ProceedingJoinPoint point) throws Throwable {
MethodSignature signature = (MethodSignature) point.getSignature();
Method method = signature.getMethod();
DataSource ds = method.getAnnotation(DataSource.class);
if (ds == null) {
DynamicDataSource.setDataSource("first");
} else {
DynamicDataSource.setDataSource(ds.name());
}
try {
return point.proceed();
} finally {
DynamicDataSource.clearDataSource();
}
}
@Override
public int getOrder() {
return 1;
}
}
数据源配置
配置两个数据库数据源,为每个数据源配置一个SQLSessionFactory,这样不同数据源的业务使用自己的session,相互隔离,当发生数据源切换的时候,也不会造成找不到数据库表的问题。
package com.luck.config;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import javax.sql.DataSource;
import org.apache.ibatis.logging.stdout.StdOutImpl;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.Resource;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.stereotype.Component;
import com.alibaba.druid.pool.DruidDataSource;
import com.luck.datasources.DynamicDataSource;
@Configuration
@Component
public class DataSourceConfig {
@Value("${spring.datasource.druid.driver-class-name}")
private String driverClass;
@Value("${spring.datasource.druid.first.url}")
private String jdbcUrl;
@Value("${spring.datasource.druid.first.username}")
private String username;
@Value("${spring.datasource.druid.first.password}")
private String password;
@Value("${spring.datasource.druid.initial-size}")
private int initialSize;
@Value("${spring.datasource.druid.min-idle}")
private int minIdle;
@Value("${spring.datasource.druid.max-active}")
private int maxActive;
@Value("${spring.datasource.druid.max-wait}")
private long maxWaitMillis;
@Value("${spring.datasource.druid.time-between-eviction-runs-millis}")
private long timeBetweenEvictionRunsMillis;
@Value("${spring.datasource.druid.min-evictable-idle-time-millis}")
private long minEvictableIdleTimeMillis;
@Value("${spring.datasource.druid.validation-query}")
private String validationQuery;
@Value("${spring.datasource.druid.test-while-idle}")
private boolean testWhileIdle;
@Value("${spring.datasource.druid.test-on-borrow}")
private boolean testOnBorrow;
@Value("${spring.datasource.druid.remove-abandoned}")
private boolean removeAbandoned;
@Value("${spring.datasource.druid.remove-abandoned-timeout}")
private int removeAbandonedTimeout;
@Value("${spring.datasource.druid.filter}")
private String filters;
@Value("${spring.datasource.druid.second.url}")
private String secondurl;
@Value("${spring.datasource.druid.second.username}")
private String secondusername;
@Value("${spring.datasource.druid.second.password}")
private String secondpassword;
/**
* 数据源1配置
*/
@Primary
@Bean(name = "first")
public DataSource first() throws SQLException {
DruidDataSource first = new DruidDataSource();
first.setDriverClassName(driverClass);
first.setUrl(jdbcUrl);
first.setUsername(username);
first.setPassword(password);
first.setInitialSize(initialSize);
first.setMinIdle(minIdle);
first.setMaxActive(maxActive);
first.setMaxWait(maxWaitMillis);
first.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
first.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
first.setValidationQuery(validationQuery);
first.setTestWhileIdle(testWhileIdle);
first.setTestOnBorrow(testOnBorrow);
first.setRemoveAbandoned(removeAbandoned);
first.setRemoveAbandonedTimeout(removeAbandonedTimeout);
first.setFilters(filters);
return first;
}
/**
* 数据源2配置
*/
@Bean(name = "second")
public DataSource second() throws SQLException {
DruidDataSource second = new DruidDataSource();
second.setDriverClassName(driverClass);
second.setUrl(secondurl);
second.setUsername(secondusername);
second.setPassword(secondpassword);
second.setInitialSize(initialSize);
second.setMinIdle(minIdle);
second.setMaxActive(maxActive);
second.setMaxWait(maxWaitMillis);
second.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
second.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
second.setValidationQuery(validationQuery);
second.setTestWhileIdle(testWhileIdle);
second.setTestOnBorrow(testOnBorrow);
second.setRemoveAbandoned(removeAbandoned);
second.setRemoveAbandonedTimeout(removeAbandonedTimeout);
second.setFilters(filters);
return second;
}
/**
* 动态数据源配置
* @param first 数据库1
* @param second 数据库2
* @return 返回动态数据源
*/
@Bean(name = "dataSource")
public DynamicDataSource dataSource(@Qualifier("first") DataSource first, @Qualifier("second") DataSource second) {
Map<Object, Object> targetDataSource = new HashMap<>(16);
targetDataSource.put("first", first);
targetDataSource.put("second", second);
DynamicDataSource dataSource = new DynamicDataSource();
dataSource.setTargetDataSources(targetDataSource);
dataSource.setDefaultTargetDataSource(first);
return dataSource;
}
/**
* 数据源1相关操作sqlSessionFactory
* @param dataSource 数据源1配置
*/
@Primary
@Bean(name = "sqlSessionFactory")
public SqlSessionFactory sqlSessionFactory(@Qualifier("first") DataSource dataSource) throws Exception {
SqlSessionFactoryBean sessionFactory = new SqlSessionFactoryBean();
sessionFactory.setDataSource(dataSource);
PathMatchingResourcePatternResolver pathMatchingResourcePatternResolver = new PathMatchingResourcePatternResolver();
Resource[] mybatis = pathMatchingResourcePatternResolver.getResources("classpath*:mybatis/**/mapper/*.xml");
Resource[] app = pathMatchingResourcePatternResolver.getResources("classpath*:app/**/mapper/*.xml");
Resource[] mls = new Resource[mybatis.length + app.length];
System.arraycopy(mybatis, 0, mls, 0, mybatis.length);
System.arraycopy(app, 0, mls, mybatis.length, app.length);
sessionFactory.setMapperLocations(mls);
sessionFactory.setTypeAliasesPackage("com.luck.**.domain");
org.apache.ibatis.session.Configuration configuration = new org.apache.ibatis.session.Configuration();
configuration.setMapUnderscoreToCamelCase(true);
configuration.setLogImpl(StdOutImpl.class);
sessionFactory.setConfiguration(configuration);
return sessionFactory.getObject();
}
/**
* 数据源2相关操作sqlSessionFactory
* @param dataSource 数据源2配置
*/
@Bean(name = "secondSqlSessionFactory")
public SqlSessionFactory secondSqlSessionFactory(@Qualifier("second") DataSource dataSource) throws Exception {
SqlSessionFactoryBean sessionFactory = new SqlSessionFactoryBean();
sessionFactory.setDataSource(dataSource);
PathMatchingResourcePatternResolver pathMatchingResourcePatternResolver = new PathMatchingResourcePatternResolver();
Resource[] mybatis = pathMatchingResourcePatternResolver.getResources("classpath*:mybatis/**/mapper/*.xml");
Resource[] app = pathMatchingResourcePatternResolver.getResources("classpath*:app/**/mapper/*.xml");
Resource[] mls = new Resource[mybatis.length + app.length];
System.arraycopy(mybatis, 0, mls, 0, mybatis.length);
System.arraycopy(app, 0, mls, mybatis.length, app.length);
sessionFactory.setMapperLocations(mls);
sessionFactory.setTypeAliasesPackage("com.luck.**.domain");
org.apache.ibatis.session.Configuration configuration = new org.apache.ibatis.session.Configuration();
configuration.setMapUnderscoreToCamelCase(true);
configuration.setLogImpl(StdOutImpl.class);
sessionFactory.setConfiguration(configuration);
return sessionFactory.getObject();
}
}
Mybatis配置
项目中使用的是tk.mybatis,其他的mybatis框架配置差不太多,这里面主要做的工作是为不同数据源的业务提供不同的扫描,项目中不同的数据源业务要使用不同包名做区分,否则在使用事务时,如果不同的业务在同一个包中会被认为是同一个session,导致出现找不到表的错误
package com.luck.config;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import tk.mybatis.mapper.autoconfigure.MapperAutoConfiguration;
import tk.mybatis.spring.mapper.MapperScannerConfigurer;
@Configuration
@AutoConfigureAfter(MapperAutoConfiguration.class)
public class MapperScannerConfig {
@Bean
public MapperScannerConfigurer mapperScannerConfigurer() {
MapperScannerConfigurer mapperScannerConfigurer = new MapperScannerConfigurer();
mapperScannerConfigurer.setSqlSessionFactoryBeanName("sqlSessionFactory");
mapperScannerConfigurer.setBasePackage("com.luck.business1**.mapper");
mapperScannerConfigurer.setMarkerInterface(com.luck.base.mapper.BaseMapper.class);
return mapperScannerConfigurer;
}
@Bean
public MapperScannerConfigurer secondMapperScannerConfigurer() {
MapperScannerConfigurer mapperScannerConfigurer = new MapperScannerConfigurer();
mapperScannerConfigurer.setSqlSessionFactoryBeanName("secondSqlSessionFactory");
mapperScannerConfigurer.setBasePackage("com.luck.business2.**.mapper");
mapperScannerConfigurer.setMarkerInterface(com.luck.base.mapper.BaseMapper.class);
return mapperScannerConfigurer;
}
}
?启动类配置
去掉默认的数据源配置
package com.luck;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
@SpringBootApplication(exclude = DataSourceAutoConfiguration.class)
public class MainApplication {
public static void main(String[] args) {
SpringApplication.run(MainApplication.class, args);
}
}
使用事务
@DataSource(name = "first")
@Override
public void test1() {
// mapper1.update()
}
@DataSource(name = "second")
@Override
public void test2() {
// mapper2.insert()
}
@GlobalTransactional
@Override
public void test3() {
test2();
test1();
int i = 1/0; // 异常
}
其他的一些使用
// 当出现异常时可以手动回滚事务信息
if (!StringUtils.isBlank(RootContext.getXID())) {
try {
GlobalTransactionContext.reload(RootContext.getXID()).rollback();
} catch (Exception e) {
}
}
|