SpringBoot 2.0 整合sharding-jdbc中间件实现数据分库分表
一、水平分割
1、水平分库
1)、概念:
以字段为依据,按照一定策略,将一个库中的数据拆分到多个库中。
2)、结果
每个库的结构都一样;数据都不一样;
所有库的并集是全量数据;
2、水平分表
1)、概念
以字段为依据,按照一定策略,将一个表中的数据拆分到多个表中。
2)、结果
每个表的结构都一样;数据都不一样;
所有表的并集是全量数据;
二、shard-jdbc 中间件
1、架构图
2、特点
1)、sharding-jdbc直接封装jdbc api,旧代码迁移成本几乎为零。
2)、适用于任何基于java的orm框架,如hibernate、mybatis等 。
3)、可基于任何第三方的数据库连接池,如dbcp、c3p0、 bonecp、druid等。
4)、以jar包形式提供服务,无proxy代理层,无需额外部署,无其他依赖。
5)、分片策略灵活,可支持等号、between、in等多维度分片,也可支持多分片键。
6)、sql解析功能完善,支持聚合、分组、排序、limit、or等查询。
三、项目演示
1、项目结构
springboot 2.0 版本
druid 1.1.13 版本
sharding-jdbc 3.1 版本
2、数据库配置
一台基础库映射(shard_one)
两台库做分库分表(shard_two,shard_three)。
表使用:table_one,table_two
3、核心代码块
数据源配置文件
spring: datasource: # 数据源:shard_one dataone: type: com.alibaba.druid.pool.druiddatasource druid: driverclassname: com.mysql.jdbc.driver url: jdbc:mysql://localhost:3306/shard_one?useunicode=true&characterencoding=utf8&zerodatetimebehavior=converttonull&usessl=false username: root password: 123 initial-size: 10 max-active: 100 min-idle: 10 max-wait: 60000 pool-prepared-statements: true max-pool-prepared-statement-per-connection-size: 20 time-between-eviction-runs-millis: 60000 min-evictable-idle-time-millis: 300000 max-evictable-idle-time-millis: 60000 validation-query: select 1 from dual # validation-query-timeout: 5000 test-on-borrow: false test-on-return: false test-while-idle: true connectionproperties: druid.stat.mergesql=true;druid.stat.slowsqlmillis=5000 # 数据源:shard_two datatwo: type: com.alibaba.druid.pool.druiddatasource druid: driverclassname: com.mysql.jdbc.driver url: jdbc:mysql://localhost:3306/shard_two?useunicode=true&characterencoding=utf8&zerodatetimebehavior=converttonull&usessl=false username: root password: 123 initial-size: 10 max-active: 100 min-idle: 10 max-wait: 60000 pool-prepared-statements: true max-pool-prepared-statement-per-connection-size: 20 time-between-eviction-runs-millis: 60000 min-evictable-idle-time-millis: 300000 max-evictable-idle-time-millis: 60000 validation-query: select 1 from dual # validation-query-timeout: 5000 test-on-borrow: false test-on-return: false test-while-idle: true connectionproperties: druid.stat.mergesql=true;druid.stat.slowsqlmillis=5000 # 数据源:shard_three datathree: type: com.alibaba.druid.pool.druiddatasource druid: driverclassname: com.mysql.jdbc.driver url: jdbc:mysql://localhost:3306/shard_three?useunicode=true&characterencoding=utf8&zerodatetimebehavior=converttonull&usessl=false username: root password: 123 initial-size: 10 max-active: 100 min-idle: 10 max-wait: 60000 pool-prepared-statements: true max-pool-prepared-statement-per-connection-size: 20 time-between-eviction-runs-millis: 60000 min-evictable-idle-time-millis: 300000 max-evictable-idle-time-millis: 60000 validation-query: select 1 from dual # validation-query-timeout: 5000 test-on-borrow: false test-on-return: false test-while-idle: true connectionproperties: druid.stat.mergesql=true;druid.stat.slowsqlmillis=5000
数据库分库策略
/** * 数据库映射计算 */ public class datasourcealg implements preciseshardingalgorithm<string> { private static logger log = loggerfactory.getlogger(datasourcealg.class); @override public string dosharding(collection<string> names, preciseshardingvalue<string> value) { log.debug("分库算法参数 {},{}",names,value); int hash = hashutil.rshash(string.valueof(value.getvalue())); return "ds_" + ((hash % 2) + 2) ; } }
数据表1分表策略
/** * 分表算法 */ public class tableonealg implements preciseshardingalgorithm<string> { private static logger log = loggerfactory.getlogger(tableonealg.class); /** * 该表每个库分5张表 */ @override public string dosharding(collection<string> names, preciseshardingvalue<string> value) { log.debug("分表算法参数 {},{}",names,value); int hash = hashutil.rshash(string.valueof(value.getvalue())); return "table_one_" + (hash % 5+1); } }
数据表2分表策略
/** * 分表算法 */ public class tabletwoalg implements preciseshardingalgorithm<string> { private static logger log = loggerfactory.getlogger(tabletwoalg.class); /** * 该表每个库分5张表 */ @override public string dosharding(collection<string> names, preciseshardingvalue<string> value) { log.debug("分表算法参数 {},{}",names,value); int hash = hashutil.rshash(string.valueof(value.getvalue())); return "table_two_" + (hash % 5+1); } }
数据源集成配置
/** * 数据库分库分表配置 */ @configuration public class shardjdbcconfig { // 省略了 druid 配置,源码中有 /** * shard-jdbc 分库配置 */ @bean public datasource datasource (@autowired druiddatasource dataonesource, @autowired druiddatasource datatwosource, @autowired druiddatasource datathreesource) throws exception { shardingruleconfiguration shardjdbcconfig = new shardingruleconfiguration(); shardjdbcconfig.gettableruleconfigs().add(gettablerule01()); shardjdbcconfig.gettableruleconfigs().add(gettablerule02()); shardjdbcconfig.setdefaultdatasourcename("ds_0"); map<string,datasource> datamap = new linkedhashmap<>() ; datamap.put("ds_0",dataonesource) ; datamap.put("ds_2",datatwosource) ; datamap.put("ds_3",datathreesource) ; properties prop = new properties(); return shardingdatasourcefactory.createdatasource(datamap, shardjdbcconfig, new hashmap<>(), prop); } /** * shard-jdbc 分表配置 */ private static tableruleconfiguration gettablerule01() { tableruleconfiguration result = new tableruleconfiguration(); result.setlogictable("table_one"); result.setactualdatanodes("ds_${2..3}.table_one_${1..5}"); result.setdatabaseshardingstrategyconfig(new standardshardingstrategyconfiguration("phone", new datasourcealg())); result.settableshardingstrategyconfig(new standardshardingstrategyconfiguration("phone", new tableonealg())); return result; } private static tableruleconfiguration gettablerule02() { tableruleconfiguration result = new tableruleconfiguration(); result.setlogictable("table_two"); result.setactualdatanodes("ds_${2..3}.table_two_${1..5}"); result.setdatabaseshardingstrategyconfig(new standardshardingstrategyconfiguration("phone", new datasourcealg())); result.settableshardingstrategyconfig(new standardshardingstrategyconfiguration("phone", new tabletwoalg())); return result; } }
测试代码执行流程
@restcontroller public class shardcontroller { @resource private shardservice shardservice ; /** * 1、建表流程 */ @requestmapping("/createtable") public string createtable (){ shardservice.createtable(); return "success" ; } /** * 2、生成表 table_one 数据 */ @requestmapping("/insertone") public string insertone (){ shardservice.insertone(); return "success" ; } /** * 3、生成表 table_two 数据 */ @requestmapping("/inserttwo") public string inserttwo (){ shardservice.inserttwo(); return "success" ; } /** * 4、查询表 table_one 数据 */ @requestmapping("/selectonebyphone/{phone}") public tableone selectonebyphone (@pathvariable("phone") string phone){ return shardservice.selectonebyphone(phone); } /** * 5、查询表 table_one 数据 */ @requestmapping("/selecttwobyphone/{phone}") public tabletwo selecttwobyphone (@pathvariable("phone") string phone){ return shardservice.selecttwobyphone(phone); } }
四、项目源码
github:知了一笑
总结
以上所述是小编给大家介绍的springboot 2.0 整合sharding-jdbc中间件实现数据分库分表,希望对大家有所帮助
上一篇: 配置mysql允许远程连接的方法
推荐阅读
-
SpringBoot 2.0 整合sharding-jdbc中间件实现数据分库分表
-
SpringBoot 2.0 整合sharding-jdbc中间件实现数据分库分表
-
SpringBoot 2.0 整合sharding-jdbc中间件,实现数据分库分表
-
SpringBoot整合sharding-jdbc实现分库分表与读写分离的示例
-
SpringBoot整合sharding-jdbc实现自定义分库分表的实践
-
数据库分库分表中间件 Sharding-JDBC 源码分析 —— 分布式主键
-
SpringBoot 2.0 整合sharding-jdbc中间件,实现数据分库分表
-
SpringBoot整合sharding-jdbc实现分库分表与读写分离的示例
-
SpringBoot整合sharding-jdbc实现自定义分库分表的实践