以下配置中 DataSourceUtil 的实现为 DataSourceUtil,ModuloShardingTableAlgorithm 类需用户自定义实现,具体例子 ModuloShardingTableAlgorithm
DataSource getShardingDataSource() throws SQLException {ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
shardingRuleConfig.getTableRuleConfigs().add(getOrderTableRuleConfiguration());
shardingRuleConfig.getTableRuleConfigs().add(getOrderItemTableRuleConfiguration());
shardingRuleConfig.getBindingTableGroups().add("t_order, t_order_item");
shardingRuleConfig.getBroadcastTables().add("t_config");
shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new InlineShardingStrategyConfiguration("user_id", "ds${user_id % 2}"));
shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_id", new ModuloShardingTableAlgorithm()));
return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, new Properties());
}
private static KeyGeneratorConfiguration getKeyGeneratorConfiguration() {KeyGeneratorConfiguration result = new KeyGeneratorConfiguration("SNOWFLAKE", "order_id");
return result;
}
TableRuleConfiguration getOrderTableRuleConfiguration() {TableRuleConfiguration result = new TableRuleConfiguration("t_order", "ds${0..1}.t_order${0..1}");
result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
return result;
}
TableRuleConfiguration getOrderItemTableRuleConfiguration() {TableRuleConfiguration result = new TableRuleConfiguration("t_order_item", "ds${0..1}.t_order_item${0..1}");
return result;
}
Map<String, DataSource> createDataSourceMap() {Map<String, DataSource> result = new HashMap<>();
result.put("ds0", DataSourceUtil.createDataSource("ds0"));
result.put("ds1", DataSourceUtil.createDataSource("ds1"));
return result;
}
DataSource getMasterSlaveDataSource() throws SQLException {MasterSlaveRuleConfiguration masterSlaveRuleConfig = new MasterSlaveRuleConfiguration("ds_master_slave", "ds_master", Arrays.asList("ds_slave0", "ds_slave1"));
return MasterSlaveDataSourceFactory.createDataSource(createDataSourceMap(), masterSlaveRuleConfig, new Properties());
}
Map<String, DataSource> createDataSourceMap() {Map<String, DataSource> result = new HashMap<>();
result.put("ds_master", DataSourceUtil.createDataSource("ds_master"));
result.put("ds_slave0", DataSourceUtil.createDataSource("ds_slave0"));
result.put("ds_slave1", DataSourceUtil.createDataSource("ds_slave1"));
return result;
}
DataSource getEncryptDataSource() throws SQLException {return EncryptDataSourceFactory.createDataSource(DataSourceUtil.createDataSource("demo_ds"), getEncryptRuleConfiguration(), new Properties());
}
private static EncryptRuleConfiguration getEncryptRuleConfiguration() {Properties props = new Properties();
props.setProperty("aes.key.value", "123456");
EncryptorRuleConfiguration encryptorConfig = new EncryptorRuleConfiguration("AES", props);
EncryptColumnRuleConfiguration columnConfig = new EncryptColumnRuleConfiguration("plain_pwd", "cipher_pwd", "","aes");
EncryptTableRuleConfiguration tableConfig = new EncryptTableRuleConfiguration(Collections.singletonMap("pwd", columnConfig));
EncryptRuleConfiguration encryptRuleConfig = new EncryptRuleConfiguration();
encryptRuleConfig.getEncryptors().put("aes", encryptorConfig);
encryptRuleConfig.getTables().put("t_encrypt", tableConfig);
return encryptRuleConfig;
}
DataSource getDataSource() throws SQLException {ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
shardingRuleConfig.getTableRuleConfigs().add(getOrderTableRuleConfiguration());
shardingRuleConfig.getTableRuleConfigs().add(getOrderItemTableRuleConfiguration());
shardingRuleConfig.getBindingTableGroups().add("t_order, t_order_item");
shardingRuleConfig.getBroadcastTables().add("t_config");
shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("user_id", new PreciseModuloShardingDatabaseAlgorithm()));
shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_id", new PreciseModuloShardingTableAlgorithm()));
shardingRuleConfig.setMasterSlaveRuleConfigs(getMasterSlaveRuleConfigurations());
return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, new Properties());
}
private static KeyGeneratorConfiguration getKeyGeneratorConfiguration() {KeyGeneratorConfiguration result = new KeyGeneratorConfiguration("SNOWFLAKE", "order_id");
return result;
}
TableRuleConfiguration getOrderTableRuleConfiguration() {TableRuleConfiguration result = new TableRuleConfiguration("t_order", "ds_${0..1}.t_order_${[0, 1]}");
result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
return result;
}
TableRuleConfiguration getOrderItemTableRuleConfiguration() {TableRuleConfiguration result = new TableRuleConfiguration("t_order_item", "ds_${0..1}.t_order_item_${[0, 1]}");
return result;
}
List<MasterSlaveRuleConfiguration> getMasterSlaveRuleConfigurations() {MasterSlaveRuleConfiguration masterSlaveRuleConfig1 = new MasterSlaveRuleConfiguration("ds_0", "demo_ds_master_0", Arrays.asList("demo_ds_master_0_slave_0", "demo_ds_master_0_slave_1"));
MasterSlaveRuleConfiguration masterSlaveRuleConfig2 = new MasterSlaveRuleConfiguration("ds_1", "demo_ds_master_1", Arrays.asList("demo_ds_master_1_slave_0", "demo_ds_master_1_slave_1"));
return Lists.newArrayList(masterSlaveRuleConfig1, masterSlaveRuleConfig2);
}
Map<String, DataSource> createDataSourceMap() {final Map<String, DataSource> result = new HashMap<>();
result.put("demo_ds_master_0", DataSourceUtil.createDataSource("demo_ds_master_0"));
result.put("demo_ds_master_0_slave_0", DataSourceUtil.createDataSource("demo_ds_master_0_slave_0"));
result.put("demo_ds_master_0_slave_1", DataSourceUtil.createDataSource("demo_ds_master_0_slave_1"));
result.put("demo_ds_master_1", DataSourceUtil.createDataSource("demo_ds_master_1"));
result.put("demo_ds_master_1_slave_0", DataSourceUtil.createDataSource("demo_ds_master_1_slave_0"));
result.put("demo_ds_master_1_slave_1", DataSourceUtil.createDataSource("demo_ds_master_1_slave_1"));
return result;
}
public DataSource getDataSource() throws SQLException {ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
shardingRuleConfig.getTableRuleConfigs().add(getOrderTableRuleConfiguration());
shardingRuleConfig.getTableRuleConfigs().add(getOrderItemTableRuleConfiguration());
shardingRuleConfig.getTableRuleConfigs().add(getOrderEncryptTableRuleConfiguration());
shardingRuleConfig.getBindingTableGroups().add("t_order, t_order_item");
shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new InlineShardingStrategyConfiguration("user_id", "demo_ds_${user_id % 2}"));
shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_id", new PreciseModuloShardingTableAlgorithm()));
shardingRuleConfig.setEncryptRuleConfig(getEncryptRuleConfiguration());
return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, new Properties());
}
private static TableRuleConfiguration getOrderTableRuleConfiguration() {TableRuleConfiguration result = new TableRuleConfiguration("t_order", "demo_ds_${0..1}.t_order_${[0, 1]}");
result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
return result;
}
private static TableRuleConfiguration getOrderItemTableRuleConfiguration() {TableRuleConfiguration result = new TableRuleConfiguration("t_order_item", "demo_ds_${0..1}.t_order_item_${[0, 1]}");
result.setEncryptorConfig(new EncryptorConfiguration("MD5", "status", new Properties()));
return result;
}
private static EncryptRuleConfiguration getEncryptRuleConfiguration() {Properties props = new Properties();
props.setProperty("aes.key.value", "123456");
EncryptorRuleConfiguration encryptorConfig = new EncryptorRuleConfiguration("AES", props);
EncryptColumnRuleConfiguration columnConfig = new EncryptColumnRuleConfiguration("plain_order", "cipher_order", "","aes");
EncryptTableRuleConfiguration tableConfig = new EncryptTableRuleConfiguration(Collections.singletonMap("order_id", columnConfig));
EncryptRuleConfiguration encryptRuleConfig = new EncryptRuleConfiguration();
encryptRuleConfig.getEncryptors().put("aes", encryptorConfig);
encryptRuleConfig.getTables().put("t_order", tableConfig);
return encryptRuleConfig;
}
private static Map<String, DataSource> createDataSourceMap() {Map<String, DataSource> result = new HashMap<>();
result.put("demo_ds_0", DataSourceUtil.createDataSource("demo_ds_0"));
result.put("demo_ds_1", DataSourceUtil.createDataSource("demo_ds_1"));
return result;
}
private static KeyGeneratorConfiguration getKeyGeneratorConfiguration() {return new KeyGeneratorConfiguration("SNOWFLAKE", "order_id", new Properties());
}
DataSource getDataSource() throws SQLException {
// OrchestrationShardingDataSourceFactory 可替换成 OrchestrationMasterSlaveDataSourceFactory 或 OrchestrationEncryptDataSourceFactory
return OrchestrationShardingDataSourceFactory.createDataSource(createDataSourceMap(), createShardingRuleConfig(), new HashMap<String, Object>(), new Properties(),
new OrchestrationConfiguration(createCenterConfigurationMap()));
}
private Map<String, CenterConfiguration> createCenterConfigurationMap() {Map<String, CenterConfiguration> instanceConfigurationMap = new HashMap<String, CenterConfiguration>();
CenterConfiguration config = createCenterConfiguration();
instanceConfigurationMap.put("orchestration-sharding-data-source", config);
return instanceConfigurationMap;
}
private CenterConfiguration createCenterConfiguration() {Properties properties = new Properties();
properties.setProperty("overwrite", overwrite);
CenterConfiguration result = new CenterConfiguration("zookeeper", properties);
result.setServerLists("localhost:2181");
result.setNamespace("sharding-sphere-orchestration");
result.setOrchestrationType("registry_center,config_center");
return result;
}
数据分片的数据源创立工厂。
名称 |
数据类型 |
阐明 |
dataSourceMap |
Map<String, DataSource> |
数据源配置 |
shardingRuleConfig |
ShardingRuleConfiguration |
数据分片配置规定 |
props (?) |
Properties |
属性配置 |
分片规定配置对象。
名称 |
数据类型 |
阐明 |
tableRuleConfigs |
Collection<TableRuleConfiguration> |
分片规定列表 |
bindingTableGroups (?) |
Collection<String> |
绑定表规定列表 |
broadcastTables (?) |
Collection<String> |
播送表规定列表 |
defaultDataSourceName (?) |
String |
未配置分片规定的表将通过默认数据源定位 |
defaultDatabaseShardingStrategyConfig (?) |
ShardingStrategyConfiguration |
默认分库策略 |
defaultTableShardingStrategyConfig (?) |
ShardingStrategyConfiguration |
默认分表策略 |
defaultKeyGeneratorConfig (?) |
KeyGeneratorConfiguration |
默认自增列值生成器配置,缺省将应用 org.apache.shardingsphere.core.keygen.generator.impl.SnowflakeKeyGenerator |
masterSlaveRuleConfigs (?) |
Collection<MasterSlaveRuleConfiguration> |
读写拆散规定,缺省示意不应用读写拆散 |
表分片规定配置对象。
名称 |
数据类型 |
阐明 |
logicTable |
String |
逻辑表名称 |
actualDataNodes (?) |
String |
由数据源名 + 表名组成,以小数点分隔。多个表以逗号分隔,反对 inline 表达式。缺省示意应用已知数据源与逻辑表名称生成数据节点,用于播送表(即每个库中都须要一个同样的表用于关联查问,多为字典表)或只分库不分表且所有库的表构造完全一致的状况 |
databaseShardingStrategyConfig (?) |
ShardingStrategyConfiguration |
分库策略,缺省示意应用默认分库策略 |
tableShardingStrategyConfig (?) |
ShardingStrategyConfiguration |
分表策略,缺省示意应用默认分表策略 |
keyGeneratorConfig (?) |
KeyGeneratorConfiguration |
自增列值生成器配置,缺省示意应用默认自增主键生成器 |
encryptorConfiguration (?) |
EncryptorConfiguration |
加解密生成器配置 |
ShardingStrategyConfiguration 的实现类,用于单分片键的规范分片场景。
名称 |
数据类型 |
阐明 |
shardingColumn |
String |
分片列名称 |
preciseShardingAlgorithm |
PreciseShardingAlgorithm |
准确分片算法,用于 = 和 IN |
rangeShardingAlgorithm (?) |
RangeShardingAlgorithm |
范畴分片算法,用于 BETWEEN |
ShardingStrategyConfiguration 的实现类,用于多分片键的复合分片场景。
名称 |
数据类型 |
阐明 |
shardingColumns |
String |
分片列名称,多个列以逗号分隔 |
shardingAlgorithm |
ComplexKeysShardingAlgorithm |
复合分片算法 |
ShardingStrategyConfiguration 的实现类,用于配置行表达式分片策略。
名称 |
数据类型 |
阐明 |
shardingColumn |
String |
分片列名称 |
algorithmExpression |
String |
分片算法行表达式,需合乎 groovy 语法,详情请参考行表达式
|
ShardingStrategyConfiguration 的实现类,用于配置 Hint 形式分片策略。
名称 |
数据类型 |
阐明 |
shardingAlgorithm |
HintShardingAlgorithm |
Hint 分片算法 |
ShardingStrategyConfiguration 的实现类,用于配置不分片的策略。
名称 |
数据类型 |
阐明 |
column |
String |
自增列名称 |
type |
String |
自增列值生成器类型,可自定义或抉择内置类型:SNOWFLAKE/UUID |
props |
Properties |
自增列值生成器的相干属性配置 |
属性配置项,能够为以下自增列值生成器的属性。
SNOWFLAKE
名称 |
数据类型 |
阐明 |
worker.id (?) |
long |
工作机器惟一 id,默认为 0 |
max.tolerate.time.difference.milliseconds (?) |
long |
最大容忍时钟回退工夫,单位:毫秒。默认为 10 毫秒 |
max.vibration.offset (?) |
int |
最大抖动上限值,范畴 [0, 4096),默认为 1。注:若应用此算法生成值作分片值,倡议配置此属性。此算法在不同毫秒内所生成的 key 取模 2^n (2^n 个别为分库或分表数) 之后后果总为 0 或 1。为避免上述分片问题,倡议将此属性值配置为 (2^n)-1 |
名称 |
数据类型 |
阐明 |
encryptors |
Map<String, EncryptorRuleConfiguration> |
加解密器配置列表,可自定义或抉择内置类型:MD5/AES |
tables |
Map<String, EncryptTableRuleConfiguration> |
加密表配置列表 |
名称 |
数据类型 |
阐明 |
type |
String |
加解密器类型,可自定义或抉择内置类型:MD5/AES |
properties |
Properties |
属性配置, 留神:应用 AES 加密器,须要配置 AES 加密器的 KEY 属性:aes.key.value |
名称 |
数据类型 |
阐明 |
tables |
Map<String, EncryptColumnRuleConfiguration> |
加密列配置列表 |
名称 |
数据类型 |
阐明 |
plainColumn |
String |
存储明文的字段 |
cipherColumn |
String |
存储密文的字段 |
assistedQueryColumn |
String |
辅助查问字段,针对 ShardingQueryAssistedEncryptor 类型的加解密器进行辅助查问 |
encryptor |
String |
加解密器名字 |
属性配置项,能够为以下属性。
名称 |
数据类型 |
阐明 |
sql.show (?) |
boolean |
是否开启 SQL 显示,默认值: false |
executor.size (?) |
int |
工作线程数量,默认值: CPU 核数 |
max.connections.size.per.query (?) |
int |
每个物理数据库为每次查问调配的最大连贯数量。默认值: 1 |
check.table.metadata.enabled (?) |
boolean |
是否在启动时查看分表元数据一致性,默认值: false |
query.with.cipher.column (?) |
boolean |
当存在明文列时,是否应用密文列查问,默认值: true |
allow.range.query.with.inline.sharding (?) |
boolean |
当应用 inline 分表策略时,是否容许范畴查问,默认值: false |
读写拆散的数据源创立工厂。
名称 |
数据类型 |
阐明 |
dataSourceMap |
Map<String, DataSource> |
数据源与其名称的映射 |
masterSlaveRuleConfig |
MasterSlaveRuleConfiguration |
读写拆散规定 |
props (?) |
Properties |
属性配置 |
读写拆散规定配置对象。
名称 |
数据类型 |
阐明 |
name |
String |
读写拆散数据源名称 |
masterDataSourceName |
String |
主库数据源名称 |
slaveDataSourceNames |
Collection<String> |
从库数据源名称列表 |
loadBalanceAlgorithm (?) |
MasterSlaveLoadBalanceAlgorithm |
从库负载平衡算法 |
属性配置项,能够为以下属性。
名称 |
数据类型 |
阐明 |
sql.show (?) |
boolean |
是否打印 SQL 解析和改写日志,默认值: false |
executor.size (?) |
int |
用于 SQL 执行的工作线程数量,为零则示意无限度。默认值: 0 |
max.connections.size.per.query (?) |
int |
每个物理数据库为每次查问调配的最大连贯数量。默认值: 1 |
check.table.metadata.enabled (?) |
boolean |
是否在启动时查看分表元数据一致性,默认值: false |
名称 |
数据类型 |
阐明 |
dataSource |
DataSource |
数据源,任意连接池 |
encryptRuleConfig |
EncryptRuleConfiguration |
数据脱敏规定 |
props (?) |
Properties |
属性配置 |
名称 |
数据类型 |
阐明 |
encryptors |
Map<String, EncryptorRuleConfiguration> |
加解密器配置列表,可自定义或抉择内置类型:MD5/AES |
tables |
Map<String, EncryptTableRuleConfiguration> |
加密表配置列表 |
属性配置项,能够为以下属性。
名称 |
数据类型 |
阐明 |
sql.show (?) |
boolean |
是否开启 SQL 显示,默认值: false |
query.with.cipher.column (?) |
boolean |
当存在明文列时,是否应用密文列查问,默认值: true |
数据分片 + 治理的数据源工厂。
名称 |
数据类型 |
阐明 |
dataSourceMap |
Map<String, DataSource> |
同 ShardingDataSourceFactory |
shardingRuleConfig |
ShardingRuleConfiguration |
同 ShardingDataSourceFactory |
props (?) |
Properties |
同 ShardingDataSourceFactory |
orchestrationConfig |
OrchestrationConfiguration |
治理规定配置 |
读写拆散 + 治理的数据源工厂。
名称 |
数据类型 |
阐明 |
dataSourceMap |
Map<String, DataSource> |
同 MasterSlaveDataSourceFactory |
masterSlaveRuleConfig |
MasterSlaveRuleConfiguration |
同 MasterSlaveDataSourceFactory |
props (?) |
Properties |
同 ShardingDataSourceFactory |
orchestrationConfig |
OrchestrationConfiguration |
治理规定配置 |
数据脱敏 + 治理的数据源工厂。
名称 |
数据类型 |
阐明 |
dataSource |
DataSource |
同 EncryptDataSourceFactory |
encryptRuleConfig |
EncryptRuleConfiguration |
同 EncryptDataSourceFactory |
props (?) |
Properties |
同 ShardingDataSourceFactory |
orchestrationConfig |
OrchestrationConfiguration |
治理规定配置 |
治理规定配置对象。
名称 |
数据类型 |
阐明 |
instanceConfigurationMap |
Map<String, CenterConfiguration> |
配置核心和注册核心的配置 map,key 为名称,value 为配置或注册核心 |
用于配置配置核心或注册核心。
名称 |
数据类型 |
阐明 |
type |
String |
配置核心或注册核心的实例类型,例如 zookeeper 或 etcd、apollo、nacos |
properties |
String |
配置本实例须要的其余参数,例如 zookeeper 的连贯参数等,具体参考 properties 配置 |
orchestrationType |
String |
配置核心或注册核心的类型,例如 config-center 或 registry-center,如果都是,能够 ”setOrchestrationType(“registry_center,config_center”);” |
serverLists |
String |
连贯配置核心或注册核心服务器的列表,包含 IP 地址和端口号,多个地址用逗号分隔。如: host1:2181,host2:2181 |
namespace (?) |
String |
配置核心或注册核心的命名空间 |
其中 properties 的通用配置如下:
名称 |
数据类型 |
阐明 |
overwrite |
boolean |
本地配置是否笼罩注册核心配置,如果可笼罩,每次启动都以本地配置为准 |
如果采纳了 zookeeper 作为配置核心或(和)注册核心,那么 properties 还能够配置:
名称 |
数据类型 |
阐明 |
digest (?) |
String |
连贯注册核心的权限令牌。缺省为不须要权限验证 |
operationTimeoutMilliseconds (?) |
int |
操作超时的毫秒数,默认 500 毫秒 |
maxRetries (?) |
int |
连贯失败后的最大重试次数,默认 3 次 |
retryIntervalMilliseconds (?) |
int |
重试距离毫秒数,默认 500 毫秒 |
timeToLiveSeconds (?) |
int |
长期节点存活秒数,默认 60 秒 |
如果采纳了 etcd 作为配置核心或(和)注册核心,那么 properties 还能够配置:
名称 |
数据类型 |
阐明 |
timeToLiveSeconds (?) |
long |
TTL 工夫,单位为秒,默认 30 秒 |
如果采纳了 apollo 作为配置核心,那么 properties 还能够配置:
名称 |
数据类型 |
阐明 |
appId (?) |
String |
apollo appId,默认值为 ”APOLLO_SHARDINGSPHERE” |
env (?) |
String |
apollo env,默认值为 ”DEV” |
clusterName (?) |
String |
apollo clusterName,默认值为 ”default” |
administrator (?) |
String |
apollo administrator,默认值为 ”” |
token (?) |
String |
apollo token,默认值为 ”” |
portalUrl (?) |
String |
apollo portalUrl,默认值为 ”” |
connectTimeout (?) |
int |
apollo connectTimeout,默认值为 1000 毫秒 |
readTimeout (?) |
int |
apollo readTimeout,默认值为 5000 毫秒 |
如果采纳了 nacos 作为配置核心,那么 properties 还能够配置:
名称 |
数据类型 |
阐明 |
group (?) |
String |
nacos group 配置,默认值为 ”SHARDING_SPHERE_DEFAULT_GROUP” |
timeout (?) |
long |
nacos 获取数据超时工夫,单位为毫秒,默认值为 3000 毫秒 |