欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

Redis的8种数据类型和事务及其使用案例

程序员文章站 2022-07-05 18:07:23
...

String、List、Hash、Set、Zset、geospatial(地理空间)、hyperloglog(超日志)、bitmaps(位图)
以下是我的实际测试内容:

5种基础数据类型

1 String

127.0.0.1:6379> exists hello // 查看key是否存在
(integer) 1 
127.0.0.1:6379> set hello world // 设置hello-world(hello为key、world为值)
OK 
127.0.0.1:6379> get hello // 获取hello的值
"world" 
127.0.0.1:6379> type hello // 值类型
string  
127.0.0.1:6379[2]> append hello ! // 字符串连接
(integer) 6
127.0.0.1:6379[2]> get hello
"world!"
127.0.0.1:6379> set haha heihei
OK
127.0.0.1:6379> keys h* // 返回满足条件的所有key
1) "hello"
3) "haha"
127.0.0.1:6379> randomkey // 随机返回一个key
"haha"
127.0.0.1:6379> randomkey
"hello"
127.0.0.1:6379> rename hello hehe // 重命名key
OK
127.0.0.1:6379> get haha
"heihei"
127.0.0.1:6379> expire haha 10
(integer) 1 // 设置haha的活动时间(s)  (10秒后这个key就会过期)
127.0.0.1:6379> ttl haha
(integer) 7 // 获取haha的活动时间
127.0.0.1:6379> get haha
(nil) // expire 时间到期后,该haha-heihei会删除
127.0.0.1:6379[2]> del hello
(integer) 0
    
==================================    
#自增和自减(i++)    
hadoop102:6379> set views 0
OK
hadoop102:6379> get views
"0"
hadoop102:6379> incr views //自增1;
(integer) 1
hadoop102:6379> decr views //自减1;
(integer) 0
#设置步长(i+=)
hadoop102:6379> incrby views 10
(integer) 10
hadoop102:6379> decrby views 10
(integer) 0

====================================
#字符串操作;    
127.0.0.1:6379[2]> substr hello 1 2 // 获取substring
"or"
hadoop102:6379> set key1 "hello,chenxu" // 获取substring
OK
hadoop102:6379> getrange key1 0 3
"hell"
hadoop102:6379> substr key1 0 -1 // 获取string全部内容
"hello,chenxu"   
hadoop102:6379> setrange key2 2 xx //从指定位置2的数开始替换;
(integer) 5
hadoop102:6379> get key2 
"hexxo" 
hadoop102:6379> setrange key2 2 llll //从指定位置2的数开始替换;
(integer) 6
hadoop102:6379> get key2 
"hellll"

==========================================
# setex  (set with expire) 设置过期时间 
# setnx  (set if not exist) 不存在。。。(分布式锁中会常常使用)
hadoop102:6379> setex key3 10 "hello" //10秒后过期
OK
hadoop102:6379> ttl key3
(integer) 7
hadoop102:6379> get key3 //值存在;
"hello"
hadoop102:6379> get key3 //值失效;
(nil)
hadoop102:6379> setnx mykey "redis" //key不存在,设置成功;
(integer) 1
hadoop102:6379> get mykey
"redis"
hadoop102:6379> setnx mykey "hello" //key已存在,设置失败;
(error) ERR unknown command `setnv`, with args beginning with: `mykey`, `hello`, 
hadoop102:6379> get mykey //值没变;
"redis"
    
===============================================
#mset //批量设置值 对应的也有msetex和msetnx(注意,这二者是原子性操作,也即有一者失败,则失败)
#mget //批量获取值 
    
hadoop102:6379> mset key1 10 key2 20 key3 30
OK
hadoop102:6379> mget key1 key2 key3
1) "10"
2) "20"
3) "30"
 
hadoop102:6379> msetnx key1 5 key4 10 //key1已存在,所以key1不能设置,连带key4的设置一起失败;
(integer) 0
hadoop102:6379> get key4
(nil)
hadoop102:6379> get key1
"10"
hadoop102:6379> 
    
================================================
#设置对象
hadoop102:6379> set user1 {name:chenxu,age:24}
OK
hadoop102:6379> get user1
"{name:chenxu,age:24}"
hadoop102:6379> mset user:1:name chenxu user:1:age 24 //也可以这样设置对象;但不是最佳方式;
OK
hadoop102:6379> mget user:1:name user:1:age //类型为key:{id}:{field} value
1) "chenxu"
2) "24"

================================================
hadoop102:6379> getset db redis //返回的是key对应的值,不存在值,添加新值;
(nil)
hadoop102:6379> get db
"redis"
hadoop102:6379> getset db mongdb //返回的是key对应的值,已经存在值,则更新新值;
"redis"
hadoop102:6379> get db
"mongdb"  

//应用:计数器;统计多单位的数量;
    
//补充命令:
    dbsize //返回当前数据库中的key的个数;
    select //选择数据库;

2 List

//lists的常用操作包括LPUSH、RPUSH、LRANGE等。我们可以用LPUSH在lists的左侧插入一个新元素,用RPUSH在lists的右侧插入一个新元素,用LRANGE命令从lists中指定一个范围来提取元素。我们来看几个例子:

//底层实际是个链表
//新建一个list叫做mylist,并在列表头部插入元素"1"
127.0.0.1:6379> lpush mylist "1" 
//返回当前mylist中的元素个数
(integer) 1 
//在mylist右侧插入元素"2"
127.0.0.1:6379> rpush mylist "2" 
(integer) 2
//在mylist左侧插入元素"0"
127.0.0.1:6379> lpush mylist "0" 
(integer) 3
//列出mylist中从编号0到编号1的元素
127.0.0.1:6379> lrange mylist 0 1 
1) "0"
2) "1"
//列出mylist中从编号0到倒数第一个元素
127.0.0.1:6379> lrange mylist 0 -1 
1) "0"
2) "1"
3) "2"

================================================    
//移除元素
lpop //删除最左边元素
rpop //删除最右边元素
hadoop102:6379> lrange list 0 -1
1) "4"
2) "3"
3) "2"
4) "1"
hadoop102:6379> lpop list //删除最左边元素
"4"
hadoop102:6379> rpop list //删除最右边元素
"1"
hadoop102:6379> lrange list 0 -1
1) "3"
2) "2"

================================================
lindex
hadoop102:6379> lrange list 0 -1
1) "4"
2) "3"
3) "2"
4) "1"
hadoop102:6379> lindex list 1 //通过下标获得list中的某一个值;
"3"
hadoop102:6379> lindex list 0 //通过下标获得list中的某一个值;
"4"
    
================================================
llen
hadoop102:6379> llen list //获取列表的长度;
(integer) 4
    
================================================
移除具体的值;
lremove
hadoop102:6379> lrem list 1 3 //移除一个值3;
(integer) 1
hadoop102:6379> lrange list 0 -1
1) "5"
2) "4"
3) "2"
4) "1"
hadoop102:6379> lrem list 1 5 //移除一个值5;
(integer) 1
hadoop102:6379> lrange list 0 -1
1) "4"
2) "2"
3) "1"

hadoop102:6379> lrange list 0 -1
1) "5"
2) "5"
3) "4"
4) "2"
5) "1"
hadoop102:6379> lrem list 3 5 //如果要删除的个数大于实际存在的个数,则删除实际存在的所有的该值;
(integer) 2
hadoop102:6379> lrange list 0 -1
1) "4"
2) "2"
3) "1"

================================================
trim //修剪list,保留一部分;
hadoop102:6379> lrange list 0 -1
1) "hello3"
2) "hello2"
3) "hello1"
4) "hello"
hadoop102:6379> ltrim list 1 2 //只保留截取部分的值
OK
hadoop102:6379> lrange list 0 -1 
1) "hello2"
2) "hello1" 
    
================================================
组合命令rpoplpush //移除列表最后一个元素,并添加到新其他列表中;
hadoop102:6379> lrange list 0 -1
1) "4"
2) "3"
3) "2"
4) "1"
hadoop102:6379> rpoplpush list list1 //返回的是最后一个元素的值;
"1"
hadoop102:6379> lrange list 0 -1 //最后一个值已经被移除;
1) "4"
2) "3"
3) "2"
hadoop102:6379> lrange list1 0 -1 //被移除的值转移到了list1中;
1) "1"

================================================
lset //更新值;   
hadoop102:6379> lrange list 0 -1
1) "4"
2) "3"
3) "2"
hadoop102:6379> lset list 0 5 //更新位置0的数为5;如果不存在list或者位置数超出范围则会报错;
OK
hadoop102:6379> lrange list 0 -1
1) "5"
2) "3"
3) "2"
    
================================================
linsert
hadoop102:6379> lrange list 0 -1
1) "2"
2) "1"
hadoop102:6379> linsert list before 1 3 //1前面插入了3;
(integer) 3
hadoop102:6379> linsert list after 2 5 //2后面插入5;
(integer) 4
hadoop102:6379> lrange list 0 -1
1) "2"
2) "5"
3) "3"
4) "1"
    
//小结:实际上是一个链表;left和right都能插入值;
//如果key不存在,创建新的链表;    
//如果key存在,新增内容;    
//删除了所有值,也就代表空链表,不存在;    
//在两边插入或者改动值,效率最高;改动中间元素,相对来说效率会低一点;      

3 Set

================================================ 
//添加值;
//向集合myset中加入一个新元素hello;
hadoop102:6379> sadd myset hello
(integer) 1
hadoop102:6379> sadd myset chenxu
(integer) 1
hadoop102:6379> sadd myset np
(integer) 1

hadoop102:6379> smembers myset //查看集合元素;
1) "hello"
2) "nb"
3) "chenxu"
hadoop102:6379> scard myset //获取集中中的元素个数;
(integer) 3    
hadoop102:6379> sismember myset nb //判断元素是否在集合中;有则输出1;
(integer) 1
hadoop102:6379> sismember myset nb1 //判断元素是否在集合中;无则输出0;
(integer) 0
hadoop102:6379> srem myset nb //删除对应的元素;
(integer) 1
hadoop102:6379> smembers myset 
1) "hello"
2) "chenxu"

hadoop102:6379> smembers myset
1) "one"
2) "two"
hadoop102:6379> smembers yourset
1) "one"
2) "2"
3) "1"
hadoop102:6379> sdiff myset yourset //查看差集;
1) "two"
hadoop102:6379> sinter myset yourset //查看交集;
1) "one"    
hadoop102:6379> sunion myset yourset //查看并集;
1) "1"
2) "2"
3) "one"
4) "two"    
    
================================================
//无序不重复元素集合;
hadoop102:6379> smembers myset
1) "hello"
2) "nb"
3) "chenxu"
hadoop102:6379> srandmember myset //随机抽取元素;
"hello"
hadoop102:6379> srandmember myset //随机抽取元素;
"chenxu"
hadoop102:6379> srandmember myset 2 //随机抽取两个元素;
1) "chenxu"
2) "nb"
hadoop102:6379> srandmember myset 2 //随机抽取两个元素;
1) "hello"
2) "nb"    
    
================================================
//删除key,随机移除元素;
hadoop102:6379> smembers myset
1) "hello"
2) "nb"
3) "chenxu"
hadoop102:6379> spop myset  //移除是随机的;
"hello"
hadoop102:6379> spop myset 
"nb"
hadoop102:6379> smembers myset
1) "chenxu"
 
================================================
//将一个指定的值,移动到另外一个集合中;
hadoop102:6379> smembers myset
1) "hello"
2) "np"
hadoop102:6379> smembers myset1
1) "1"
hadoop102:6379> smove myset myset1 hello //把hello从myset转移至myset1中;
(integer) 1
hadoop102:6379> smembers myset
1) "np"
hadoop102:6379> smembers myset1
1) "hello"
2) "1" 
        

4 Hash

//建立哈希,并赋值
127.0.0.1:6379> hmset user:001 username antirez password P1pp0 age 34 //设置多个map;
OK
//列出哈希的内容
127.0.0.1:6379> hgetall user:001 //12是一对哈希map,34是一对哈希map,56是一对哈希map
1) "username"
2) "antirez"
3) "password"
4) "P1pp0"
5) "age"
6) "34"
//更改哈希中的某一个值
127.0.0.1:6379> hset user:001 password 12345 //设置单个map;
(integer) 0
//再次列出哈希的内容
127.0.0.1:6379> hgetall user:001 
1) "username"
2) "antirez"
3) "password"
4) "12345"
5) "age"
6) "34"
hadoop102:6379> hget user:001 username //获取key为user:001中 field为username对应的值;
"antirez"
hadoop102:6379> hmget user:001 username password //获取key为user:001中 多个field对应的值;
1) "antirez"
2) "12345"
    
hadoop102:6379> hdel user:001 age //删除其中某个或者某些field的值;(返回成功删除的个数,如果没有则返回0)
(integer) 1
hadoop102:6379> hgetall user:001
1) "username"
2) "antirez"
3) "password"
4) "12345"
hadoop102:6379> hlen user:001 //显示长度为2,即两对map;
(integer) 2

hadoop102:6379> hexists user:001 username //存在对应的field输出1;
(integer) 1
hadoop102:6379> hexists user:001 age //不存在对应的field输出0;
(integer) 0

hadoop102:6379> hkeys user:001 //获取所有field的key;
1) "password"
2) "username"
hadoop102:6379> hvals user:001 //获取所有field的value;
1) "12345"
2) "antirez"

================================================
hadoop102:6379> hset myhash field 1
(integer) 1
hadoop102:6379> hincrby myhash field 5 //自增
(integer) 6
hadoop102:6379> hincrby myhash field 5
(integer) 11

hadoop102:6379> hsetnx myhash field1 2 //不存在对应的field,插入成功;
(integer) 1
hadoop102:6379> hgetall myhash
1) "field"
2) "6"
3) "field1"
4) "2" 
hadoop102:6379> hsetnx myhash field 3 //存在对应的field,插入失败;
(integer) 0
hadoop102:6379> hgetall myhash
1) "field"
2) "6"
3) "field1"
4) "2"

//hash应用:用户信息保存;经常变动的信息保存;    

5 Zset(有序集合)

================================================
//有序集合zset; 
hadoop102:6379> zadd myzset 1 chenxu //添加元素及序号参数到zset中;
(integer) 1
hadoop102:6379> zadd myzset 3 majie
(integer) 1
hadoop102:6379> zadd myzset 2 liuyanlin
(integer) 1
hadoop102:6379> zrange myzset 0 -1 //查看zset中元素;
1) "chenxu"
2) "liuyanlin"
3) "majie"
hadoop102:6379> zrange myzset 0 -1 withscores //查看zset中元素及序号编号;
1) "chenxu"
2) "1"
3) "liuyanlin"
4) "2"
5) "majie"
6) "3

================================================
//有序集合排序;    
hadoop102:6379> zadd salary 1000 zhangsan
(integer) 1
hadoop102:6379> zadd salary 2000 lisi 
(integer) 1
hadoop102:6379> zadd salary 1500 wangwu
(integer) 1
hadoop102:6379> zrangebyscore salary -inf +inf withscores //从小到大排序
1) "zhangsan"
2) "1000"
3) "wangwu"
4) "1500"
5) "lisi"
6) "2000"
hadoop102:6379> zrevrange salary 0 -1 withscore //从大到小排序
1) "lisi"
2) "2000"
3) "wangwu"
4) "1500"
5) "zhangsan"
6) "1000"
    
hadoop102:6379> zrangebyscore salary (5 (2000 //(5 (2000表示的是 5< x <2000的范围; 从小到大排序
1) "zhangsan"
2) "wangwu"
hadoop102:6379> zrevrangebyscore salary (2000 (500 withscores //表示的是 <500 x <2000的范围;从大到小排序
1) "wangwu"
2) "1500"
3) "zhangsan"
4) "1000"

    
================================================
//移除元素
hadoop102:6379> zrange salary 0 -1
1) "zhangsan"
2) "wangwu"
3) "lisi"
hadoop102:6379> zrem salary zhangsan
(integer) 1
hadoop102:6379> zrange salary 0 -1
1) "wangwu"
2) "lisi"
    
================================================
//获取指定区间的成员数量
hadoop102:6379> zcount salary 500 1500 
(integer) 2
hadoop102:6379> zcount salary 1000 1500 
(integer) 2
hadoop102:6379> zcount salary 1000 2000 //可以看出,左右
    都是闭区间;
(integer) 3
 
//应用实现:排行榜;排序;    

3种特殊数据类型

6 geospatial(地理空间)

//地理空间(geospatial)以及索引半径查询;
//添加地理位置的(经度、纬度、名称)
//把某个具体的位置信息(经度,纬度,名称)添加到指定的key中,数据将会用一个sorted set存储,以便稍后能使用 GEORADIUS和 GEORADIUSBYMEMBER命令来根据半径来查询位置信息。
//一共6个命令:
GEOADD 添加一个或多个地理位置元素到一个key中 
格式:GEOADD key longitude latitude member [longitude latitude member ...]

GEODIST 返回一个key中指定两个位置之间的距离
格式:GEODIST key member1 member2 [unit]  unit可以指定长度单位:m,km,ft等 默认为m

GEOHASH  返回一个或多个位置元素的 Geohash 表示,Geohash是一种经纬度散列算法,具体请百度。
格式: GEOHASH key member [member ...]

GEOPOS 返回一个或多个位置的经纬度信息,由于采用了geohash算法,返回的经纬度和添加时的数据可能会有细小误差
格式: GEOPOS key member [member ...]

GEORADIUS  以给定位置为中心,半径不超过给定半径的附近所有位置
格式 GEORADIUS key longitude latitude radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count]

GEORADIUSBYMEMBER 和GEORADIUS相似,只是中心点不是指定经纬度,而是指定已添加的某个位置作为中心
格式: GEORADIUSBYMEMBER key member radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count]

================================================
//一般下载好数据后,用Java一次导入;
//GEOADD;
//GEOADD key longitude latitude member [longitude latitude member ...]    
hadoop102:6379> geoadd China:city 112.99514856933591 23.21346451288593 guangzhou //添加key、经纬度及名称;
(integer) 1
hadoop102:6379> geoadd China:city 116.23128 40.22077 beijing
(integer) 1
hadoop102:6379> geoadd China:city 121.48941 31.40527 shanghai
(integer) 1
hadoop102:6379> geoadd China:city 117.30983 39.71755 tianjin
(integer) 1
hadoop102:6379> geoadd China:city 113.88308 22.55329 shenzhen
(integer) 1
hadoop102:6379> geoadd China:city 120.21201 30.2084 hangzhou
(integer) 1
hadoop102:6379> geoadd China:city 108.93425 34.23053 xian
(integer) 1

================================================    
//GEOPOS
//GEOPOS key member [member ...]    
hadoop102:6379> geopos China:city guangzhou shanghai //返回广州和上海的经纬度坐标;
1) 1) "112.99514919519424438"
   2) "23.21346431104928598"
2) 1) "121.48941010236740112"
   2) "31.40526993848380499"
    
================================================    
//GEODIST
//GEODIST key member1 member2 [unit]    
hadoop102:6379> geodist China:city guangzhou shanghai km //返回广州和上海的直线距离(默认单位是m,这里选的是km)
"1237.9327"

================================================    
//通过半径查询;
//GEORADIUS;
//GEORADIUS key longitude latitude radius m|km|ft|mi 
//GEORADIUS key longitude latitude radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count]    
hadoop102:6379> georadius China:city 110 30 1100 km //在以(110,30)为中心点内1100km内的城市;
1) "xian"
2) "shenzhen"
3) "guangzhou"
4) "hangzhou"
    
hadoop102:6379> georadius China:city 110 30 1100 km withdist //返回城市以及它与中心点的距离;
1) 1) "xian"
   2) "481.1278"
2) 1) "shenzhen"
   2) "914.1294"
3) 1) "guangzhou"
   2) "811.3693"
4) 1) "hangzhou"
   2) "982.5796"

hadoop102:6379> georadius China:city 110 30 1100 km withcoord //返回城市及城市坐标;
1) 1) "xian"
   2) 1) "108.93425256013870239"
      2) "34.23053097599082406"
2) 1) "shenzhen"
   2) 1) "113.88307839632034302"
      2) "22.55329111565713873"
3) 1) "guangzhou"
   2) 1) "112.99514919519424438"
      2) "23.21346431104928598"
4) 1) "hangzhou"
   2) 1) "120.21200805902481079"
      2) "30.20839995425554747"
    
hadoop102:6379> georadius China:city 110 30 1100 km count 2 //按照距中心点距离顺序返回2个范围内城市; 
1) "xian"
2) "guangzhou"
    
hadoop102:6379> georadius China:city 110 30 1100 km count 2 ASC //按照距中心点距离顺序返回2个范围内城市; 
1) "xian"
2) "guangzhou"
    
hadoop102:6379> georadius China:city 110 30 1100 km count 2 DESC //按照距中心点距离逆序返回2个范围内城市;
1) "hangzhou"
2) "shenzhen"

hadoop102:6379> georadius China:city 110 30 1100 km withhash //返回城市及哈希值;
1) 1) "xian"
   2) (integer) 4040115270369361
2) 1) "shenzhen"
   2) (integer) 4046340107163728
3) 1) "guangzhou"
   2) (integer) 4046509575538088
4) 1) "hangzhou"

================================================ 
//GEORADIUSBYMEMBER 
//和GEORADIUS相似,只是中心点不是指定经纬度,而是指定已添加的某个位置作为中心
//GEORADIUSBYMEMBER key member radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count]    
hadoop102:6379> georadiusbymember China:city guangzhou 500 km count 3 DESC //用法相同,如果count超现,返回最大值
1) "shenzhen"
2) "guangzhou"
    
================================================    
//GEOHASH
//GEOHASH key member [member ...]
hadoop102:6379> geohash China:city guangzhou shanghai //一种散列算法;将二维经纬度转化为一维字符串,字符串越像,说明距离越近;
1) "ws0k53bs1z0"
2) "wtw6st1uuq0"

================================================ 
//GEO的底层实现是Zset,所以可以用Zset命令操作GEO;
hadoop102:6379> zrange China:city 0 -1
1) "xian"
2) "shenzhen"
3) "guangzhou"
4) "hangzhou"
5) "shanghai"
6) "tianjin"
7) "beijing"

hadoop102:6379> zrem China:city beijing //移除member北京;
(integer) 1
hadoop102:6379> zrange China:city 0 -1
1) "xian"
2) "shenzhen"
3) "guangzhou"
4) "hangzhou"
5) "shanghai"
6) "tianjin"
      
================================================     
//应用:
//筛选出周围的人数;    

7 hyperloglog(超日志)

//基数:(不重复的元素)
//HyperLogLog是一种算法,并非redis独有;
//统计人数:传统方式:set保存用户id,统计set中的元素数量;
//set做会保存大量用户id;
//Hyperloglog不会保存用户id,占用内存固定,2^64只需要用12KB的内存;
//Redis 对 HyperLogLog 的存储进行了优化,在计数比较小时,它的存储空间采用稀疏矩阵存储,空间占用很小,仅仅在计数慢慢变大,稀疏矩阵占用空间渐渐超过了阈值时才会一次性转变成稠密矩阵,才会占用 12k 的空间;

================================================   
hadoop102:6379> pfadd mykey a b c d e f g h i j //赋值;
(integer) 1
hadoop102:6379> pfcount mykey //返回插入的基数;
(integer) 10
hadoop102:6379> pfadd mykey1 s i h b g f k a g b c 
(integer) 1
hadoop102:6379> pfcount mykey1
(integer) 9
adoop102:6379> pfmerge mykey mykey1 //把mykey1合并到mykey中(mykey变,mykey11不变,如果mykey一开始不存在,默认为空)
OK
hadoop102:6379> pfcount mykey1
(integer) 9
hadoop102:6379> pfcount mykey //多了k、s两个新的值,基数+2;
(integer) 12
    
//误差说明:基数估计的结果是一个带有 0.81% 标准错误(standard error)的近似值。是可接受的范围;
//允许容错就用Hyperloglog,不允许容错就用set或其他数据类型;

8 bitmaps(位图)

//使用bitmaps记录一周的打卡记录;
hadoop102:6379> setbit sign 0 1 //设置;
(integer) 0
hadoop102:6379> setbit sign 1 1 
(integer) 0
hadoop102:6379> setbit sign 2 1
(integer) 0
hadoop102:6379> setbit sign 3 0 
(integer) 0
hadoop102:6379> setbit sign 4 0
(integer) 0
hadoop102:6379> setbit sign 5 1
(integer) 0
hadoop102:6379> setbit sign 6 1
(integer) 0
hadoop102:6379> getbit sign 3 //查询;
(integer) 0
hadoop102:6379> getbit sign 5
(integer) 1
hadoop102:6379> bitcount sign //统计;
(integer) 5
    
//应用:统计用户活跃、登录情况、数据条数统计;

事务

1.事务讲解

原子性:要么同时成功,要么同时失败;

Redis单条命令是保证原子性的(例如mset),但是Redis事务是不保证原子性的;

redis事务的本质:一组命令的集合:(一个事务中的所有命令都会被序列化,并且按照顺序执行);

特点:一次性、顺序性、排他性;

 redis事务的执行过程:

1. 开启事务。
2. 命令入队。
3. 执行事务。

和MySQL事务的区别:

Redis事务没有原子性、隔离级别的概念;(和MySQL的事务性是有区别的)
mysql中的事务有4大特性:原子性、一致性、隔离性、持久性。

原子性 (Atomicity):事务中的全部操作在数据库中是不可分割的,要么全部完成,要么均不执行。
一致性 (Consistency):几个并行执行的事务,其执行结果必须与按某一顺序串行执行的结果相一致。
隔离性 (Isolation):事务的执行不受其他事务的干扰,当数据库被多个客户端并发访问时,隔离它们的操作,防止出现:脏读、幻读、不可重复读。
持久性 (Durability):对于任意已提交事务,系统必须保证该事务对数据库的改变不被丢失。
针对脏读(一个事务读取到另一个事务未提交的数据),不可重复读(一个事务读取到另一个事务已经提交的数据),幻读(一个事务多次查询整表数据,由于其他事务新增(删除)记录造成多次查询的记录条数不同(一个事务读取到另一个事务已经提交的数据)),提出四大隔离级别。

read uncommitted——不作任何隔离,具有脏读、不可重复读、幻读问题
read committed——可防止脏读,不能防止不可重复读和幻读问题
repeatable read——可以防止脏读、不可重复读,不能防止幻读问题(mysql默认是这个隔离级别)
serializable——数据库运行在串行化,上述问题都可以防止,只是性能非常低

Redis中所有的命令在事务中,并没有直接被执行,只有发起执行命令的时候才会执行;

multi // 开启事务
// 以下操作就是命令入队
set key1 value1
set key2 value2
get key1
exec // 执行事务
    
multi // 开启事务
// 以下操作就是命令入队
set key1 value1
set key2 value2
get key1
DISCARD // 放弃事务,放弃后队列里的命令都不会执行    
//实操:
hadoop102:6379> set k1 1
QUEUED
hadoop102:6379> set k2 2
QUEUED
hadoop102:6379> get k1
QUEUED
hadoop102:6379> set ke 3
QUEUED
hadoop102:6379> exec
1) OK
2) OK
3) "1" 
4) OK //顺序性输出;

================================================     
//当事务执行时,发生了编译型异常(代码有问题),事务汇总的所有命令都不会被执行。
hadoop102:6379> set key1 1
QUEUED
hadoop102:6379> set key2 2
QUEUED
hadoop102:6379> set key3 3
QUEUED
hadoop102:6379> getset key3 //这里报错;
(error) ERR wrong number of arguments for 'getset' command
hadoop102:6379> set key4 4 //这里还能继续添加命令入队;
QUEUED
hadoop102:6379> get key1
QUEUED
hadoop102:6379> exec //执行报错;
(error) EXECABORT Transaction discarded because of previous errors.
hadoop102:6379> get key1 //key1没有成功插入;
(nil)

================================================     
//运行时异常,如果编译通过了,但是事务队列中存在逻辑上的错误导致出错,除报错的命令,其他的命令都会正常执行下去。
hadoop102:6379> set k1 hello
OK
hadoop102:6379> multi
OK
hadoop102:6379> incr k1 //字符串不能自增,这里会出异常;
QUEUED
hadoop102:6379> set key2 2
QUEUED
hadoop102:6379> exec
1) (error) ERR value is not an integer or out of range //运行报错;
2) OK
hadoop102:6379> get key2 //后续的插入命令仍然成功;
"2" 

================================================     
//redis是没有事务回滚的。

2.乐观锁

//悲观锁:很悲观,认为什么时候都会出现问题,所以无论做什么都会直接加锁。基于这个假设,我们在做操作前就会把相关资源锁定,不允许自己执行期间有其他操作干扰。

//乐观锁:很乐观,认为什么时候都不会出问题,所有不会直接加锁。而是需要操作的时候会先去获取一次值,更新数据的时候去判断一下,之前获取到的值和该数据现在的值是否一致,如果一致表示中间没有被修改过,这时再去更新。基于这个假设,我们在做操作前不会锁定资源,万一发生了其他操作的干扰,那么本次操作将被放弃。Redis 使用的就是乐观锁。

================================================ 
//正常情况下
hadoop102:6379> set balance 10000 // 初始化余额
OK
hadoop102:6379> set consume 0 //初始化消费金额
OK
hadoop102:6379> watch balance //开启对balance的监视
OK
hadoop102:6379> multi //开启事务
OK
hadoop102:6379> decrby balance 100 //余额减100
QUEUED
hadoop102:6379> incrby consume 100 // 消费的金额增加100
QUEUED
hadoop102:6379> exec //提交事务
1) (integer) 9900
2) (integer) 100

================================================     
//非正常情况下;(再开一个Redis客户端)
hadoop102:6379> watch balance //相当于加了一个乐观锁;
OK
hadoop102:6379> multi
OK
hadoop102:6379> decrby balance 100
QUEUED
hadoop102:6379> incrby consume 100
QUEUED
hadoop102:6379> decrby balance 100 //这一步是第二个客户端的操作;改动了balance;
(integer) 9800  
hadoop102:6379> exec //这里不先执行,等第二个客户端执行完对应命令后再执行;执行失败;
(nil)
    
================================================ 
//解锁再加锁;    
hadoop102:6379> unwatch //事务执行失败,先解锁后加锁,再次监控;
OK
hadoop102:6379> watch balance
(error) ERR unknown command `warch`, with args beginning with: `balance`, 
hadoop102:6379> watch balance
OK
hadoop102:6379> multi
OK
hadoop102:6379> decrby balance 1000
QUEUED
hadoop102:6379> incrby consume 1000
QUEUED
hadoop102:6379> exec
1) (integer) 8800
2) (integer) 1100


exec //前面的操作是一模一样的,但是这里假如在使用exec提交事务之前,有人执行了下面的命令对数据进行了修改,这里监视到balance被修改过,那么整个事务执行会失败;  
相关标签: redis