From 017fec179b0ff94601a8948aba667d9106a1cc33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Siatkowski?= Date: Wed, 7 Aug 2019 16:58:52 +0200 Subject: [PATCH 1/4] - created common interface for single redis client and cluster classes - moved common implementation for cluster classes to traits (as in single client case) --- src/main/scala/com/redis/EvalOperations.scala | 46 +-- src/main/scala/com/redis/GeoOperations.scala | 135 +++----- src/main/scala/com/redis/HashOperations.scala | 98 ++---- .../com/redis/HyperLogLogOperations.scala | 18 +- src/main/scala/com/redis/ListOperations.scala | 72 ++-- src/main/scala/com/redis/NodeOperations.scala | 48 +-- src/main/scala/com/redis/Operations.scala | 187 ++++------- src/main/scala/com/redis/RedisClient.scala | 3 +- src/main/scala/com/redis/SetOperations.scala | 83 ++--- .../scala/com/redis/SortedSetOperations.scala | 100 ++---- .../scala/com/redis/StringOperations.scala | 106 ++---- src/main/scala/com/redis/api/BaseApi.scala | 170 ++++++++++ src/main/scala/com/redis/api/EvalApi.scala | 28 ++ src/main/scala/com/redis/api/GeoApi.scala | 102 ++++++ src/main/scala/com/redis/api/HashApi.scala | 84 +++++ .../scala/com/redis/api/HyperLogLogApi.scala | 19 ++ src/main/scala/com/redis/api/ListApi.scala | 82 +++++ src/main/scala/com/redis/api/NodeApi.scala | 42 +++ src/main/scala/com/redis/api/SetApi.scala | 99 ++++++ .../scala/com/redis/api/SortedSetApi.scala | 66 ++++ src/main/scala/com/redis/api/StringApi.scala | 113 +++++++ .../scala/com/redis/cluster/BaseOps.scala | 110 ++++++ .../scala/com/redis/cluster/HashOps.scala | 56 ++++ .../scala/com/redis/cluster/ListOps.scala | 57 ++++ .../scala/com/redis/cluster/NodeOps.scala | 31 ++ .../com/redis/cluster/RedisCluster.scala | 312 ++---------------- .../com/redis/cluster/RedisClusterOps.scala | 60 ++++ .../scala/com/redis/cluster/RedisShards.scala | 285 ++-------------- src/main/scala/com/redis/cluster/SetOps.scala | 58 ++++ .../com/redis/cluster/SortedSetOps.scala | 86 +++++ .../scala/com/redis/cluster/StringOps.scala | 84 +++++ .../com/redis/cluster/RedisClusterSpec.scala | 4 +- .../com/redis/cluster/RedisShardsSpec.scala | 10 +- 33 files changed, 1697 insertions(+), 1157 deletions(-) create mode 100644 src/main/scala/com/redis/api/BaseApi.scala create mode 100644 src/main/scala/com/redis/api/EvalApi.scala create mode 100644 src/main/scala/com/redis/api/GeoApi.scala create mode 100644 src/main/scala/com/redis/api/HashApi.scala create mode 100644 src/main/scala/com/redis/api/HyperLogLogApi.scala create mode 100644 src/main/scala/com/redis/api/ListApi.scala create mode 100644 src/main/scala/com/redis/api/NodeApi.scala create mode 100644 src/main/scala/com/redis/api/SetApi.scala create mode 100644 src/main/scala/com/redis/api/SortedSetApi.scala create mode 100644 src/main/scala/com/redis/api/StringApi.scala create mode 100644 src/main/scala/com/redis/cluster/BaseOps.scala create mode 100644 src/main/scala/com/redis/cluster/HashOps.scala create mode 100644 src/main/scala/com/redis/cluster/ListOps.scala create mode 100644 src/main/scala/com/redis/cluster/NodeOps.scala create mode 100644 src/main/scala/com/redis/cluster/RedisClusterOps.scala create mode 100644 src/main/scala/com/redis/cluster/SetOps.scala create mode 100644 src/main/scala/com/redis/cluster/SortedSetOps.scala create mode 100644 src/main/scala/com/redis/cluster/StringOps.scala diff --git a/src/main/scala/com/redis/EvalOperations.scala b/src/main/scala/com/redis/EvalOperations.scala index 84ae790c..0617fa23 100644 --- a/src/main/scala/com/redis/EvalOperations.scala +++ b/src/main/scala/com/redis/EvalOperations.scala @@ -1,50 +1,50 @@ package com.redis -import serialization._ +import com.redis.api.EvalApi +import com.redis.serialization._ -trait EvalOperations { self: Redis => +trait EvalOperations extends EvalApi { + self: Redis => - // EVAL - // evaluates lua code on the server. - def evalMultiBulk[A](luaCode: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = - send("EVAL", argsForEval(luaCode, keys, args))(asList[A]) + override def evalMultiBulk[A](luaCode: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + send("EVAL", argsForEval(luaCode, keys, args))(asList[A]) - def evalBulk[A](luaCode: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] = + override def evalBulk[A](luaCode: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] = send("EVAL", argsForEval(luaCode, keys, args))(asBulk) - - def evalInt(luaCode: String, keys: List[Any], args: List[Any]): Option[Int] = + + override def evalInt(luaCode: String, keys: List[Any], args: List[Any]): Option[Int] = send("EVAL", argsForEval(luaCode, keys, args))(asInt) - - def evalMultiSHA[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + + override def evalMultiSHA[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = send("EVALSHA", argsForEval(shahash, keys, args))(asList[A]) - - def evalSHA[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] = + + override def evalSHA[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] = send("EVALSHA", argsForEval(shahash, keys, args))(asAny.asInstanceOf[Option[A]]) - def evalSHABulk[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] = + override def evalSHABulk[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] = send("EVALSHA", argsForEval(shahash, keys, args))(asBulk) - - def scriptLoad(luaCode: String): Option[String] = { + + override def scriptLoad(luaCode: String): Option[String] = { send("SCRIPT", List("LOAD", luaCode))(asBulk) } - - def scriptExists(shahash: String): Option[Int] = { + + override def scriptExists(shahash: String): Option[Int] = { send("SCRIPT", List("EXISTS", shahash))(asList[String]) match { case Some(list) => { - if (list.size>0 && list(0).isDefined){ + if (list.size > 0 && list(0).isDefined) { Some(list(0).get.toInt) - }else{ + } else { None } } case None => None } } - - def scriptFlush: Option[String] = { + + override def scriptFlush: Option[String] = { send("SCRIPT", List("FLUSH"))(asString) } - + private def argsForEval(luaCode: String, keys: List[Any], args: List[Any]): List[Any] = luaCode :: keys.length :: keys ::: args } diff --git a/src/main/scala/com/redis/GeoOperations.scala b/src/main/scala/com/redis/GeoOperations.scala index fc761cbd..1cb98f13 100644 --- a/src/main/scala/com/redis/GeoOperations.scala +++ b/src/main/scala/com/redis/GeoOperations.scala @@ -1,122 +1,65 @@ package com.redis +import com.redis.api.GeoApi import com.redis.serialization._ -/** - * Created by alexis on 05/09/16. - */ -trait GeoOperations { self: Redis => +trait GeoOperations extends GeoApi { + self: Redis => private def flattenProduct3(in: Iterable[Product3[Any, Any, Any]]): List[Any] = in.iterator.flatMap(x => Iterator(x._1, x._2, x._3)).toList - /** - * Add the given members in the key geo sorted set - * @param key The geo sorted set - * @param members The members to be added. Format is (longitude, latitude, member) - * @return The number of elements added to the index. Repeated elements are not added. - */ - def geoadd(key: Any, members: Iterable[Product3[Any, Any, Any]]): Option[Int] = { + override def geoadd(key: Any, members: Iterable[Product3[Any, Any, Any]]): Option[Int] = { send("GEOADD", key :: flattenProduct3(members))(asInt) } - /** - * Retrieve the position of the members in the key geo sorted set. Note that if a member is not part of the set, None - * will be returned for this element. - * @param key - * @param members - * @param format - * @param parse - * @tparam A - * @return the coordinates of the input members in the same order. - */ - def geopos[A](key: Any, members: Iterable[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[List[Option[A]]]]] = { + override def geopos[A](key: Any, members: Iterable[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[List[Option[A]]]]] = { send("GEOPOS", key :: members.toList)(receive(multiBulkNested).map(_.map(_.map(_.map(_.map(parse)))))) } - /** - * Get the geohash for each member in the key geo index. - * @param key - * @param members - * @param format - * @param parse - * @tparam A - * @return The geohash of each queried member. - */ - def geohash[A](key: Any, members: Iterable[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]]= { + override def geohash[A](key: Any, members: Iterable[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = { send("GEOHASH", key :: members.toList)(asList[A]) } - def geodist(key: Any, m1: Any, m2: Any, unit: Option[Any]): Option[String] = { + override def geodist(key: Any, m1: Any, m2: Any, unit: Option[Any]): Option[String] = { send("GEODIST", List(key, m1, m2) ++ unit.toList)(asBulk[String]) } - /** - * Search for members around an origin point in the key geo sorted set - * @param key The geo index we are searching in - * @param longitude The base longitude for distance computation - * @param latitude The base latitude for distance computation - * @param radius The radius of the circle we want to search in - * @param unit The unit of the radius. Can be m (meters), km (kilometers), mi (miles), ft (feet) - * @param withCoord If true, the coordinate of the found members will be returned in the result - * @param withDist If true, the distance between the origin and the found members will be returned in the result - * @param withHash If true, the hash of the found members will be returned in the result - * @param count Max number of expected results - * @param sort The sorting strategy. If empty, order is not guaranteed. Can be ASC (ascending) or DESC (descending) - * @param store The Redis store we want to write the result in - * @param storeDist The redis storedist we want to write the result in - * @return The found members as GeoRadiusMember instances - */ - def georadius(key: Any, - longitude: Any, - latitude: Any, - radius: Any, - unit: Any, - withCoord: Boolean, - withDist: Boolean, - withHash: Boolean, - count: Option[Int], - sort: Option[Any], - store: Option[Any], - storeDist: Option[Any]): Option[List[Option[GeoRadiusMember]]] = { - val radArgs = List( if (withCoord) List("WITHCOORD") else Nil - , if (withDist) List("WITHDIST") else Nil - , if (withHash) List("WITHHASH") else Nil - , sort.fold[List[Any]](Nil)(b => List(b)) - , count.fold[List[Any]](Nil)(b => List("COUNT", b)) - , store.fold[List[Any]](Nil)(b => List("STORE", b)) - , storeDist.fold[List[Any]](Nil)(b => List("STOREDIST", b)) - ).flatten + override def georadius(key: Any, + longitude: Any, + latitude: Any, + radius: Any, + unit: Any, + withCoord: Boolean, + withDist: Boolean, + withHash: Boolean, + count: Option[Int], + sort: Option[Any], + store: Option[Any], + storeDist: Option[Any]): Option[List[Option[GeoRadiusMember]]] = { + val radArgs = List(if (withCoord) List("WITHCOORD") else Nil + , if (withDist) List("WITHDIST") else Nil + , if (withHash) List("WITHHASH") else Nil + , sort.fold[List[Any]](Nil)(b => List(b)) + , count.fold[List[Any]](Nil)(b => List("COUNT", b)) + , store.fold[List[Any]](Nil)(b => List("STORE", b)) + , storeDist.fold[List[Any]](Nil)(b => List("STOREDIST", b)) + ).flatten send("GEORADIUS", List(key, longitude, latitude, radius, unit) ++ radArgs)(receive(geoRadiusMemberReply)) } - /** - * Search for members around a specific memberin the key geo sorted set - * @param key The geo index we are searching in - * @param member The member we are searching around - * @param radius The radius of the circle we want to search in - * @param unit The unit of the radius. Can be m (meters), km (kilometers), mi (miles), ft (feet) - * @param withCoord If true, the coordinate of the found members will be returned in the result - * @param withDist If true, the distance between the origin and the found members will be returned in the result - * @param withHash If true, the hash of the found members will be returned in the result - * @param count Max number of expected results - * @param sort The sorting strategy. If empty, order is not guaranteed. Can be ASC (ascending) or DESC (descending) - * @param store The Redis store we want to write the result in - * @param storeDist The redis storedist we want to write the result in - * @return The found members as GeoRadiusMember instances - */ - def georadiusbymember[A](key: Any, - member: Any, - radius: Any, - unit: Any, - withCoord: Boolean, - withDist: Boolean, - withHash: Boolean, - count: Option[Int], - sort: Option[Any], - store: Option[Any], - storeDist: Option[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[GeoRadiusMember]]] = { - val radArgs = List( if (withCoord) List("WITHCOORD") else Nil + override def georadiusbymember[A](key: Any, + member: Any, + radius: Any, + unit: Any, + withCoord: Boolean, + withDist: Boolean, + withHash: Boolean, + count: Option[Int], + sort: Option[Any], + store: Option[Any], + storeDist: Option[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[GeoRadiusMember]]] = { + val radArgs = List(if (withCoord) List("WITHCOORD") else Nil , if (withDist) List("WITHDIST") else Nil , if (withHash) List("WITHHASH") else Nil , sort.fold[List[Any]](Nil)(b => List(b)) diff --git a/src/main/scala/com/redis/HashOperations.scala b/src/main/scala/com/redis/HashOperations.scala index 57f28bd7..b7633aa8 100644 --- a/src/main/scala/com/redis/HashOperations.scala +++ b/src/main/scala/com/redis/HashOperations.scala @@ -1,64 +1,27 @@ package com.redis -import serialization._ +import com.redis.api.HashApi +import com.redis.serialization._ -trait HashOperations { +trait HashOperations extends HashApi { self: Redis => - /** - * Sets field in the hash stored at key to value. - * If key does not exist, a new key holding a hash is created. - * If field already exists in the hash, it is overwritten. - * - * @see [[http://redis.io/commands/hset HSET documentation]] - * @deprecated return value semantics is inconsistent with [[com.redis.HashOperations#hsetnx]] and - * [[com.redis.HashOperations#hmset]]. Use [[com.redis.HashOperations#hset1]] instead - * @return True if field is a new field in the hash and value was set, - * False if field already exists in the hash and the value was updated. - * - */ - def hset(key: Any, field: Any, value: Any)(implicit format: Format): Boolean = - send("HSET", List(key, field, value))(asBoolean) - - /** Sets field in the hash stored at key to value. - * If key does not exist, a new key holding a hash is created. - * If field already exists in the hash, it is overwritten. - * - * @see [[http://redis.io/commands/hset HSET documentation]] - * @return Some(0) if field is a new field in the hash and value was set, - * Some(1) if field already exists in the hash and the value was updated. - */ - def hset1(key: Any, field: Any, value: Any)(implicit format: Format): Option[Long] = - send("HSET", List(key, field, value))(asLong) - - /** - * Sets field in the hash stored at key to value, only if field does not yet exist. - * If key does not exist, a new key holding a hash is created. - * If field already exists, this operation has no effect. - * - * @see [[http://redis.io/commands/hsetnx HSETNX documentation]] - * @return True if field is a new field in the hash and value was set. - * False if field exists in the hash and no operation was performed. - */ - def hsetnx(key: Any, field: Any, value: Any)(implicit format: Format): Boolean = - send("HSETNX", List(key, field, value))(asBoolean) - - def hget[A](key: Any, field: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + + override def hset(key: Any, field: Any, value: Any)(implicit format: Format): Boolean = + send("HSET", List(key, field, value))(asBoolean) + + override def hset1(key: Any, field: Any, value: Any)(implicit format: Format): Option[Long] = + send("HSET", List(key, field, value))(asLong) + + override def hsetnx(key: Any, field: Any, value: Any)(implicit format: Format): Boolean = + send("HSETNX", List(key, field, value))(asBoolean) + + override def hget[A](key: Any, field: Any)(implicit format: Format, parse: Parse[A]): Option[A] = send("HGET", List(key, field))(asBulk) - /** - * Sets the specified fields to their respective values in the hash stored at key. - * This command overwrites any existing fields in the hash. - * If key does not exist, a new key holding a hash is created. - * - * @param map from fields to values - * @see [[http://redis.io/commands/hmset HMSET documentation]] - * @return True if operation completed successfully, - * False otherwise. - */ - def hmset(key: Any, map: Iterable[Product2[Any, Any]])(implicit format: Format): Boolean = - send("HMSET", key :: flattenPairs(map))(asBoolean) - - def hmget[K, V](key: Any, fields: K*)(implicit format: Format, parseV: Parse[V]): Option[Map[K, V]] = + override def hmset(key: Any, map: Iterable[Product2[Any, Any]])(implicit format: Format): Boolean = + send("HMSET", key :: flattenPairs(map))(asBoolean) + + override def hmget[K, V](key: Any, fields: K*)(implicit format: Format, parseV: Parse[V]): Option[Map[K, V]] = send("HMGET", key :: fields.toList) { asList.map { values => fields.zip(values).flatMap { @@ -68,39 +31,36 @@ trait HashOperations { } } - def hincrby(key: Any, field: Any, value: Long)(implicit format: Format): Option[Long] = + override def hincrby(key: Any, field: Any, value: Long)(implicit format: Format): Option[Long] = send("HINCRBY", List(key, field, value))(asLong) - def hincrbyfloat(key: Any, field: Any, value: Float)(implicit format: Format): Option[Float] = + override def hincrbyfloat(key: Any, field: Any, value: Float)(implicit format: Format): Option[Float] = send("HINCRBYFLOAT", List(key, field, value))(asBulk.map(_.toFloat)) - def hexists(key: Any, field: Any)(implicit format: Format): Boolean = + override def hexists(key: Any, field: Any)(implicit format: Format): Boolean = send("HEXISTS", List(key, field))(asBoolean) - def hdel(key: Any, field: Any, fields: Any*)(implicit format: Format): Option[Long] = + override def hdel(key: Any, field: Any, fields: Any*)(implicit format: Format): Option[Long] = send("HDEL", List(key, field) ::: fields.toList)(asLong) - def hlen(key: Any)(implicit format: Format): Option[Long] = + override def hlen(key: Any)(implicit format: Format): Option[Long] = send("HLEN", List(key))(asLong) - def hkeys[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[List[A]] = + override def hkeys[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[List[A]] = send("HKEYS", List(key))(asList.map(_.flatten)) - def hvals[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[List[A]] = + override def hvals[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[List[A]] = send("HVALS", List(key))(asList.map(_.flatten)) - @deprecated("Use the more idiomatic variant hgetall1, which has the returned Map behavior more consistent. See issue https://github.com/debasishg/scala-redis/issues/122", "3.2") - def hgetall[K, V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[Map[K, V]] = + override def hgetall[K, V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[Map[K, V]] = send("HGETALL", List(key))(asListPairs[K, V].map(_.flatten.toMap)) - def hgetall1[K, V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[Map[K, V]] = + override def hgetall1[K, V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[Map[K, V]] = send("HGETALL", List(key))(asListPairs[K, V].map(_.flatten.toMap)) match { case s@Some(m) if m.nonEmpty => s case _ => None } - // HSCAN - // Incrementally iterate hash fields and associated values (since 2.8) - def hscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = - send("HSCAN", key :: cursor :: ((x: List[Any]) => if (pattern == "*") x else "match" :: pattern :: x) (if (count == 10) Nil else List("count", count)))(asPair) + override def hscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = + send("HSCAN", key :: cursor :: ((x: List[Any]) => if (pattern == "*") x else "match" :: pattern :: x) (if (count == 10) Nil else List("count", count)))(asPair) } diff --git a/src/main/scala/com/redis/HyperLogLogOperations.scala b/src/main/scala/com/redis/HyperLogLogOperations.scala index 49f65877..4b4fafef 100644 --- a/src/main/scala/com/redis/HyperLogLogOperations.scala +++ b/src/main/scala/com/redis/HyperLogLogOperations.scala @@ -1,20 +1,16 @@ package com.redis -import serialization._ +import com.redis.api.HyperLogLogApi -trait HyperLogLogOperations { self: Redis => - // PFADD (>= 2.8.9) - // Add a value to the hyperloglog - def pfadd(key: Any, value: Any, values: Any*): Option[Long] = +trait HyperLogLogOperations extends HyperLogLogApi { + self: Redis => + + override def pfadd(key: Any, value: Any, values: Any*): Option[Long] = send("PFADD", List(key, value) ::: values.toList)(asLong) - // PFCOUNT (>= 2.8.9) - // Get the estimated cardinality from one or more keys - def pfcount(keys: Any*): Option[Long] = + override def pfcount(keys: Any*): Option[Long] = send("PFCOUNT", keys.toList)(asLong) - // PFMERGE (>= 2.8.9) - // Merge existing keys - def pfmerge(destination: Any, sources: Any*): Boolean = + override def pfmerge(destination: Any, sources: Any*): Boolean = send("PFMERGE", List(destination) ::: sources.toList)(asBoolean) } diff --git a/src/main/scala/com/redis/ListOperations.scala b/src/main/scala/com/redis/ListOperations.scala index ef9fa4f3..b26726a2 100644 --- a/src/main/scala/com/redis/ListOperations.scala +++ b/src/main/scala/com/redis/ListOperations.scala @@ -1,84 +1,56 @@ package com.redis -import serialization._ +import com.redis.api.ListApi +import com.redis.serialization._ -trait ListOperations { self: Redis => +trait ListOperations extends ListApi { + self: Redis => - // LPUSH (Variadic: >= 2.4) - // add values to the head of the list stored at key - def lpush(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = + override def lpush(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = send("LPUSH", List(key, value) ::: values.toList)(asLong) - // LPUSHX (Variadic: >= 2.4) - // add value to the head of the list stored at key - def lpushx(key: Any, value: Any)(implicit format: Format): Option[Long] = + override def lpushx(key: Any, value: Any)(implicit format: Format): Option[Long] = send("LPUSHX", List(key, value))(asLong) - // RPUSH (Variadic: >= 2.4) - // add values to the tail of the list stored at key - def rpush(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = + override def rpush(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = send("RPUSH", List(key, value) ::: values.toList)(asLong) - // RPUSHX (Variadic: >= 2.4) - // add value to the tail of the list stored at key - def rpushx(key: Any, value: Any)(implicit format: Format): Option[Long] = + override def rpushx(key: Any, value: Any)(implicit format: Format): Option[Long] = send("RPUSHX", List(key, value))(asLong) - // LLEN - // return the length of the list stored at the specified key. - // If the key does not exist zero is returned (the same behaviour as for empty lists). - // If the value stored at key is not a list an error is returned. - def llen(key: Any)(implicit format: Format): Option[Long] = + override def llen(key: Any)(implicit format: Format): Option[Long] = send("LLEN", List(key))(asLong) - // LRANGE - // return the specified elements of the list stored at the specified key. - // Start and end are zero-based indexes. - def lrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + override def lrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = send("LRANGE", List(key, start, end))(asList) - // LTRIM - // Trim an existing list so that it will contain only the specified range of elements specified. - def ltrim(key: Any, start: Int, end: Int)(implicit format: Format): Boolean = + override def ltrim(key: Any, start: Int, end: Int)(implicit format: Format): Boolean = send("LTRIM", List(key, start, end))(asBoolean) - // LINDEX - // return the especified element of the list stored at the specified key. - // Negative indexes are supported, for example -1 is the last element, -2 the penultimate and so on. - def lindex[A](key: Any, index: Int)(implicit format: Format, parse: Parse[A]): Option[A] = + override def lindex[A](key: Any, index: Int)(implicit format: Format, parse: Parse[A]): Option[A] = send("LINDEX", List(key, index))(asBulk) - // LSET - // set the list element at index with the new value. Out of range indexes will generate an error - def lset(key: Any, index: Int, value: Any)(implicit format: Format): Boolean = + override def lset(key: Any, index: Int, value: Any)(implicit format: Format): Boolean = send("LSET", List(key, index, value))(asBoolean) - // LREM - // Remove the first count occurrences of the value element from the list. - def lrem(key: Any, count: Int, value: Any)(implicit format: Format): Option[Long] = + override def lrem(key: Any, count: Int, value: Any)(implicit format: Format): Option[Long] = send("LREM", List(key, count, value))(asLong) - // LPOP - // atomically return and remove the first (LPOP) or last (RPOP) element of the list - def lpop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + override def lpop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = send("LPOP", List(key))(asBulk) - // RPOP - // atomically return and remove the first (LPOP) or last (RPOP) element of the list - def rpop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + override def rpop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = send("RPOP", List(key))(asBulk) - // RPOPLPUSH - // Remove the first count occurrences of the value element from the list. - def rpoplpush[A](srcKey: Any, dstKey: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + override def rpoplpush[A](srcKey: Any, dstKey: Any)(implicit format: Format, parse: Parse[A]): Option[A] = send("RPOPLPUSH", List(srcKey, dstKey))(asBulk) - def brpoplpush[A](srcKey: Any, dstKey: Any, timeoutInSeconds: Int)(implicit format: Format, parse: Parse[A]): Option[A] = + override def brpoplpush[A](srcKey: Any, dstKey: Any, timeoutInSeconds: Int)(implicit format: Format, parse: Parse[A]): Option[A] = send("BRPOPLPUSH", List(srcKey, dstKey, timeoutInSeconds))(asBulkWithTime) - def blpop[K,V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[(K,V)] = - send("BLPOP", key :: keys.foldRight(List[Any](timeoutInSeconds))(_ :: _))(asListPairs[K,V].flatMap(_.flatten.headOption)) + override def blpop[K, V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[(K, V)] = + send("BLPOP", key :: keys.foldRight(List[Any](timeoutInSeconds))(_ :: _))(asListPairs[K, V].flatMap(_.flatten.headOption)) - def brpop[K,V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[(K,V)] = - send("BRPOP", key :: keys.foldRight(List[Any](timeoutInSeconds))(_ :: _))(asListPairs[K,V].flatMap(_.flatten.headOption)) + override def brpop[K, V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[(K, V)] = + send("BRPOP", key :: keys.foldRight(List[Any](timeoutInSeconds))(_ :: _))(asListPairs[K, V].flatMap(_.flatten.headOption)) } diff --git a/src/main/scala/com/redis/NodeOperations.scala b/src/main/scala/com/redis/NodeOperations.scala index b980ddb0..66d6dd7d 100644 --- a/src/main/scala/com/redis/NodeOperations.scala +++ b/src/main/scala/com/redis/NodeOperations.scala @@ -1,52 +1,40 @@ package com.redis -import serialization._ +import com.redis.api.NodeApi -trait NodeOperations { self: Redis => +trait NodeOperations extends NodeApi { + self: Redis => - // SAVE - // save the DB on disk now. - def save: Boolean = + override def save: Boolean = send("SAVE")(asBoolean) - // BGSAVE - // save the DB in the background. - def bgsave: Boolean = + override def bgsave: Boolean = send("BGSAVE")(asBoolean) - // LASTSAVE - // return the UNIX TIME of the last DB SAVE executed with success. - def lastsave: Option[Long] = + override def lastsave: Option[Long] = send("LASTSAVE")(asLong) - - // SHUTDOWN - // Stop all the clients, save the DB, then quit the server. - def shutdown: Boolean = + + override def shutdown: Boolean = send("SHUTDOWN")(asBoolean) - // BGREWRITEAOF - def bgrewriteaof: Boolean = + override def bgrewriteaof: Boolean = send("BGREWRITEAOF")(asBoolean) - // INFO - // the info command returns different information and statistics about the server. - def info = + override def info: Option[String] = send("INFO")(asBulk) - - // MONITOR - // is a debugging command that outputs the whole sequence of commands received by the Redis server. - def monitor: Boolean = + + override def monitor: Boolean = send("MONITOR")(asBoolean) - - // SLAVEOF - // The SLAVEOF command can change the replication settings of a slave on the fly. - def slaveof(options: Any): Boolean = options match { + + override def slaveof(options: Any): Boolean = options match { case (h: String, p: Int) => send("SLAVEOF", List(h, p))(asBoolean) case _ => setAsMaster() } - - @deprecated("use slaveof", "1.2.0") def slaveOf(options: Any): Boolean = slaveof(options) + + @deprecated("use slaveof", "1.2.0") + def slaveOf(options: Any): Boolean = + slaveof(options) private def setAsMaster(): Boolean = send("SLAVEOF", List("NO", "ONE"))(asBoolean) diff --git a/src/main/scala/com/redis/Operations.scala b/src/main/scala/com/redis/Operations.scala index 45781cd2..c92860d1 100644 --- a/src/main/scala/com/redis/Operations.scala +++ b/src/main/scala/com/redis/Operations.scala @@ -1,198 +1,139 @@ package com.redis -import serialization._ - -trait Operations { self: Redis => - // SORT - // sort keys in a set, and optionally pull values for them - def sort[A](key:String, - limit:Option[(Int, Int)] = None, - desc:Boolean = false, - alpha:Boolean = false, - by:Option[String] = None, - get:List[String] = Nil)(implicit format:Format, parse:Parse[A]):Option[List[Option[A]]] = { - - val commands:List[Any] = makeSortArgs(key, limit, desc, alpha, by, get) +import com.redis.api.BaseApi +import com.redis.serialization._ + +trait Operations extends BaseApi { + self: Redis => + + override def sort[A](key: String, + limit: Option[(Int, Int)] = None, + desc: Boolean = false, + alpha: Boolean = false, + by: Option[String] = None, + get: List[String] = Nil)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = { + + val commands: List[Any] = makeSortArgs(key, limit, desc, alpha, by, get) send("SORT", commands)(asList) } - private def makeSortArgs(key:String, - limit:Option[(Int, Int)] = None, - desc:Boolean = false, - alpha:Boolean = false, - by:Option[String] = None, - get:List[String] = Nil): List[Any] = { + private def makeSortArgs(key: String, + limit: Option[(Int, Int)] = None, + desc: Boolean = false, + alpha: Boolean = false, + by: Option[String] = None, + get: List[String] = Nil): List[Any] = { List(List(key), limit.map(l => List("LIMIT", l._1, l._2)).getOrElse(Nil) , (if (desc) List("DESC") else Nil) , (if (alpha) List("ALPHA") else Nil) , by.map(b => List("BY", b)).getOrElse(Nil) , get.flatMap(g => List("GET", g)) - ).flatten + ).flatten } - // SORT with STORE - // sort keys in a set, and store result in the supplied key - def sortNStore[A](key:String, - limit:Option[(Int, Int)] = None, - desc:Boolean = false, - alpha:Boolean = false, - by:Option[String] = None, - get:List[String] = Nil, - storeAt: String)(implicit format:Format, parse:Parse[A]):Option[Long] = { + override def sortNStore[A](key: String, + limit: Option[(Int, Int)] = None, + desc: Boolean = false, + alpha: Boolean = false, + by: Option[String] = None, + get: List[String] = Nil, + storeAt: String)(implicit format: Format, parse: Parse[A]): Option[Long] = { val commands = makeSortArgs(key, limit, desc, alpha, by, get) ::: List("STORE", storeAt) send("SORT", commands)(asLong) } - // KEYS - // returns all the keys matching the glob-style pattern. - def keys[A](pattern: Any = "*")(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + override def keys[A](pattern: Any = "*")(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = send("KEYS", List(pattern))(asList) - // TIME - // returns the current server time as a two items lists: - // a Unix timestamp and the amount of microseconds already elapsed in the current second. - def time[A](implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + override def time[A](implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = send("TIME")(asList) - // RANDKEY - // return a randomly selected key from the currently selected DB. - @deprecated("use randomkey", "2.8") def randkey[A](implicit parse: Parse[A]): Option[A] = + @deprecated("use randomkey", "2.8") + def randkey[A](implicit parse: Parse[A]): Option[A] = send("RANDOMKEY")(asBulk) - // RANDOMKEY - // return a randomly selected key from the currently selected DB. - def randomkey[A](implicit parse: Parse[A]): Option[A] = + override def randomkey[A](implicit parse: Parse[A]): Option[A] = send("RANDOMKEY")(asBulk) - // RENAME (oldkey, newkey) - // atomically renames the key oldkey to newkey. - def rename(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = + override def rename(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = send("RENAME", List(oldkey, newkey))(asBoolean) - - // RENAMENX (oldkey, newkey) - // rename oldkey into newkey but fails if the destination key newkey already exists. - def renamenx(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = + + override def renamenx(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = send("RENAMENX", List(oldkey, newkey))(asBoolean) - - // DBSIZE - // return the size of the db. - def dbsize: Option[Long] = + + override def dbsize: Option[Long] = send("DBSIZE")(asLong) - // EXISTS (key) - // test if the specified key exists. - def exists(key: Any)(implicit format: Format): Boolean = + override def exists(key: Any)(implicit format: Format): Boolean = send("EXISTS", List(key))(asBoolean) - // DELETE (key1 key2 ..) - // deletes the specified keys. - def del(key: Any, keys: Any*)(implicit format: Format): Option[Long] = + override def del(key: Any, keys: Any*)(implicit format: Format): Option[Long] = send("DEL", key :: keys.toList)(asLong) - // TYPE (key) - // return the type of the value stored at key in form of a string. - def getType(key: Any)(implicit format: Format): Option[String] = + override def getType(key: Any)(implicit format: Format): Option[String] = send("TYPE", List(key))(asString) - // EXPIRE (key, expiry) - // sets the expire time (in sec.) for the specified key. - def expire(key: Any, ttl: Int)(implicit format: Format): Boolean = + override def expire(key: Any, ttl: Int)(implicit format: Format): Boolean = send("EXPIRE", List(key, ttl))(asBoolean) - // PEXPIRE (key, expiry) - // sets the expire time (in milli sec.) for the specified key. - def pexpire(key: Any, ttlInMillis: Int)(implicit format: Format): Boolean = + override def pexpire(key: Any, ttlInMillis: Int)(implicit format: Format): Boolean = send("PEXPIRE", List(key, ttlInMillis))(asBoolean) - // EXPIREAT (key, unix timestamp) - // sets the expire time for the specified key. - def expireat(key: Any, timestamp: Long)(implicit format: Format): Boolean = + override def expireat(key: Any, timestamp: Long)(implicit format: Format): Boolean = send("EXPIREAT", List(key, timestamp))(asBoolean) - // PEXPIREAT (key, unix timestamp) - // sets the expire timestamp in millis for the specified key. - def pexpireat(key: Any, timestampInMillis: Long)(implicit format: Format): Boolean = + override def pexpireat(key: Any, timestampInMillis: Long)(implicit format: Format): Boolean = send("PEXPIREAT", List(key, timestampInMillis))(asBoolean) - // TTL (key) - // returns the remaining time to live of a key that has a timeout - def ttl(key: Any)(implicit format: Format): Option[Long] = + override def ttl(key: Any)(implicit format: Format): Option[Long] = send("TTL", List(key))(asLong) - // PTTL (key) - // returns the remaining time to live of a key that has a timeout in millis - def pttl(key: Any)(implicit format: Format): Option[Long] = + override def pttl(key: Any)(implicit format: Format): Option[Long] = send("PTTL", List(key))(asLong) - // SELECT (index) - // selects the DB to connect, defaults to 0 (zero). - def select(index: Int): Boolean = + override def select(index: Int): Boolean = send("SELECT", List(index))(if (asBoolean) { db = index true } else { false }) - - - // FLUSHDB the DB - // removes all the DB data. - def flushdb: Boolean = + + override def flushdb: Boolean = send("FLUSHDB")(asBoolean) - // FLUSHALL the DB's - // removes data from all the DB's. - def flushall: Boolean = + override def flushall: Boolean = send("FLUSHALL")(asBoolean) - // MOVE - // Move the specified key from the currently selected DB to the specified destination DB. - def move(key: Any, db: Int)(implicit format: Format): Boolean = + override def move(key: Any, db: Int)(implicit format: Format): Boolean = send("MOVE", List(key, db))(asBoolean) - - // QUIT - // exits the server. - def quit: Boolean = + + override def quit: Boolean = send("QUIT")(disconnect) - - // AUTH - // auths with the server. - def auth(secret: Any)(implicit format: Format): Boolean = + + override def auth(secret: Any)(implicit format: Format): Boolean = send("AUTH", List(secret))(asBoolean) - // PERSIST (key) - // Remove the existing timeout on key, turning the key from volatile (a key with an expire set) - // to persistent (a key that will never expire as no timeout is associated). - def persist(key: Any)(implicit format: Format): Boolean = + override def persist(key: Any)(implicit format: Format): Boolean = send("PERSIST", List(key))(asBoolean) - // SCAN - // Incrementally iterate the keys space (since 2.8) - def scan[A](cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = - send("SCAN", cursor :: ((x: List[Any]) => if(pattern == "*") x else "match" :: pattern :: x)(if(count == 10) Nil else List("count", count)))(asPair) + override def scan[A](cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = + send("SCAN", cursor :: ((x: List[Any]) => if (pattern == "*") x else "match" :: pattern :: x) (if (count == 10) Nil else List("count", count)))(asPair) - // PING - def ping: Option[String] = send("PING")(asString) + override def ping: Option[String] = send("PING")(asString) - // WATCH (key1 key2 ..) - // Marks the given keys to be watched for conditional execution of a transaction. - // - def watch(key: Any, keys: Any*)(implicit format: Format): Boolean = + override def watch(key: Any, keys: Any*)(implicit format: Format): Boolean = send("WATCH", key :: keys.toList)(asBoolean) - // UNWATCH - // Flushes all the previously watched keys for a transaction - def unwatch(): Boolean = + override def unwatch(): Boolean = send("UNWATCH")(asBoolean) - // CONFIG GET - def getConfig(key: Any = "*")(implicit format: Format): Option[Map[String, Option[String]]] = + override def getConfig(key: Any = "*")(implicit format: Format): Option[Map[String, Option[String]]] = send("CONFIG", List("GET", key))(asList).map { ls => ls.grouped(2).collect { case Some(k) :: v :: Nil => k -> v }.toMap } - // CONFIG SET - def setConfig(key: Any, value: Any)(implicit format: Format): Option[String] = + override def setConfig(key: Any, value: Any)(implicit format: Format): Option[String] = send("CONFIG", List("SET", key, value))(asString) } diff --git a/src/main/scala/com/redis/RedisClient.scala b/src/main/scala/com/redis/RedisClient.scala index c63b9b46..f3bc02a5 100644 --- a/src/main/scala/com/redis/RedisClient.scala +++ b/src/main/scala/com/redis/RedisClient.scala @@ -61,7 +61,8 @@ trait Redis extends IO with Protocol { protected def initialize : Boolean } -trait RedisCommand extends Redis with Operations +trait RedisCommand extends Redis + with Operations with GeoOperations with NodeOperations with StringOperations diff --git a/src/main/scala/com/redis/SetOperations.scala b/src/main/scala/com/redis/SetOperations.scala index 1167bbf8..a3d71aa7 100644 --- a/src/main/scala/com/redis/SetOperations.scala +++ b/src/main/scala/com/redis/SetOperations.scala @@ -1,98 +1,59 @@ package com.redis -import serialization._ +import com.redis.api.SetApi +import com.redis.serialization._ -trait SetOperations { self: Redis => +trait SetOperations extends SetApi { + self: Redis => - // SADD (VARIADIC: >= 2.4) - // Add the specified members to the set value stored at key. - def sadd(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = + override def sadd(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = send("SADD", List(key, value) ::: values.toList)(asLong) - // SREM (VARIADIC: >= 2.4) - // Remove the specified members from the set value stored at key. - def srem(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = + override def srem(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = send("SREM", List(key, value) ::: values.toList)(asLong) - // SPOP - // Remove and return (pop) a random element from the Set value at key. - def spop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + override def spop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = send("SPOP", List(key))(asBulk) - // SPOP - // Remove and return multiple random elements (pop) from the Set value at key since (3.2). - def spop[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + override def spop[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = send("SPOP", List(key, count))(asSet) - // SMOVE - // Move the specified member from one Set to another atomically. - def smove(sourceKey: Any, destKey: Any, value: Any)(implicit format: Format): Option[Long] = + override def smove(sourceKey: Any, destKey: Any, value: Any)(implicit format: Format): Option[Long] = send("SMOVE", List(sourceKey, destKey, value))(asLong) - // SCARD - // Return the number of elements (the cardinality) of the Set at key. - def scard(key: Any)(implicit format: Format): Option[Long] = + override def scard(key: Any)(implicit format: Format): Option[Long] = send("SCARD", List(key))(asLong) - // SISMEMBER - // Test if the specified value is a member of the Set at key. - def sismember(key: Any, value: Any)(implicit format: Format): Boolean = + override def sismember(key: Any, value: Any)(implicit format: Format): Boolean = send("SISMEMBER", List(key, value))(asBoolean) - // SINTER - // Return the intersection between the Sets stored at key1, key2, ..., keyN. - def sinter[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + override def sinter[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = send("SINTER", key :: keys.toList)(asSet) - // SINTERSTORE - // Compute the intersection between the Sets stored at key1, key2, ..., keyN, - // and store the resulting Set at dstkey. - // SINTERSTORE returns the size of the intersection, unlike what the documentation says - // refer http://code.google.com/p/redis/issues/detail?id=121 - def sinterstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = + override def sinterstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = send("SINTERSTORE", key :: keys.toList)(asLong) - // SUNION - // Return the union between the Sets stored at key1, key2, ..., keyN. - def sunion[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + override def sunion[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = send("SUNION", key :: keys.toList)(asSet) - // SUNIONSTORE - // Compute the union between the Sets stored at key1, key2, ..., keyN, - // and store the resulting Set at dstkey. - // SUNIONSTORE returns the size of the union, unlike what the documentation says - // refer http://code.google.com/p/redis/issues/detail?id=121 - def sunionstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = + override def sunionstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = send("SUNIONSTORE", key :: keys.toList)(asLong) - // SDIFF - // Return the difference between the Set stored at key1 and all the Sets key2, ..., keyN. - def sdiff[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + override def sdiff[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = send("SDIFF", key :: keys.toList)(asSet) - // SDIFFSTORE - // Compute the difference between the Set key1 and all the Sets key2, ..., keyN, - // and store the resulting Set at dstkey. - def sdiffstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = + override def sdiffstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = send("SDIFFSTORE", key :: keys.toList)(asLong) - // SMEMBERS - // Return all the members of the Set value at key. - def smembers[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + override def smembers[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = send("SMEMBERS", List(key))(asSet) - // SRANDMEMBER - // Return a random element from a Set - def srandmember[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + override def srandmember[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = send("SRANDMEMBER", List(key))(asBulk) - // SRANDMEMBER - // Return multiple random elements from a Set (since 2.6) - def srandmember[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + override def srandmember[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = send("SRANDMEMBER", List(key, count))(asList) - // SSCAN - // Incrementally iterate Set elements (since 2.8) - def sscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = - send("SSCAN", key :: cursor :: ((x: List[Any]) => if(pattern == "*") x else "match" :: pattern :: x)(if(count == 10) Nil else List("count", count)))(asPair) + override def sscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = + send("SSCAN", key :: cursor :: ((x: List[Any]) => if (pattern == "*") x else "match" :: pattern :: x) (if (count == 10) Nil else List("count", count)))(asPair) } diff --git a/src/main/scala/com/redis/SortedSetOperations.scala b/src/main/scala/com/redis/SortedSetOperations.scala index e6fcaa3b..d30df9a5 100644 --- a/src/main/scala/com/redis/SortedSetOperations.scala +++ b/src/main/scala/com/redis/SortedSetOperations.scala @@ -1,47 +1,34 @@ package com.redis -import serialization._ +import com.redis.RedisClient._ +import com.redis.api.SortedSetApi +import com.redis.serialization._ -trait SortedSetOperations { +trait SortedSetOperations extends SortedSetApi { self: Redis => - // ZADD (Variadic: >= 2.4) - // Add the specified members having the specified score to the sorted set stored at key. - def zadd(key: Any, score: Double, member: Any, scoreVals: (Double, Any)*)(implicit format: Format): Option[Long] = + override def zadd(key: Any, score: Double, member: Any, scoreVals: (Double, Any)*)(implicit format: Format): Option[Long] = send("ZADD", List(key, score, member) ::: scoreVals.toList.flatMap(x => List(x._1, x._2)))(asLong) - // ZREM (Variadic: >= 2.4) - // Remove the specified members from the sorted set value stored at key. - def zrem(key: Any, member: Any, members: Any*)(implicit format: Format): Option[Long] = + override def zrem(key: Any, member: Any, members: Any*)(implicit format: Format): Option[Long] = send("ZREM", List(key, member) ::: members.toList)(asLong) - // ZINCRBY - // - def zincrby(key: Any, incr: Double, member: Any)(implicit format: Format): Option[Double] = + override def zincrby(key: Any, incr: Double, member: Any)(implicit format: Format): Option[Double] = send("ZINCRBY", List(key, incr, member))(asBulk(Parse.Implicits.parseDouble)) - // ZCARD - // - def zcard(key: Any)(implicit format: Format): Option[Long] = + override def zcard(key: Any)(implicit format: Format): Option[Long] = send("ZCARD", List(key))(asLong) - // ZSCORE - // - def zscore(key: Any, element: Any)(implicit format: Format): Option[Double] = + override def zscore(key: Any, element: Any)(implicit format: Format): Option[Double] = send("ZSCORE", List(key, element))(asBulk(Parse.Implicits.parseDouble)) - // ZRANGE - // - import Commands._ - import RedisClient._ - - def zrange[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[A]] = + override def zrange[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[A]] = send(if (sortAs == ASC) "ZRANGE" else "ZREVRANGE", List(key, start, end))(asList.map(_.flatten)) - def zrangeWithScore[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[(A, Double)]] = + override def zrangeWithScore[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[(A, Double)]] = send(if (sortAs == ASC) "ZRANGE" else "ZREVRANGE", List(key, start, end, "WITHSCORES"))(asListPairs(parse, Parse.Implicits.parseDouble).map(_.flatten)) - def zrangebylex[A](key: Any, min: String, max: String, limit: Option[(Int, Int)])(implicit format: Format, parse: Parse[A]): Option[List[A]] = { + override def zrangebylex[A](key: Any, min: String, max: String, limit: Option[(Int, Int)])(implicit format: Format, parse: Parse[A]): Option[List[A]] = { if (!limit.isEmpty) { val params = limit.toList.flatMap(l => List(key, min, max, "LIMIT", l._1, l._2)) send("ZRANGEBYLEX", params)(asList.map(_.flatten)) @@ -51,15 +38,13 @@ trait SortedSetOperations { } } - // ZRANGEBYSCORE - // - def zrangebyscore[A](key: Any, - min: Double = Double.NegativeInfinity, - minInclusive: Boolean = true, - max: Double = Double.PositiveInfinity, - maxInclusive: Boolean = true, - limit: Option[(Int, Int)], - sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[A]] = { + override def zrangebyscore[A](key: Any, + min: Double = Double.NegativeInfinity, + minInclusive: Boolean = true, + max: Double = Double.PositiveInfinity, + maxInclusive: Boolean = true, + limit: Option[(Int, Int)], + sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[A]] = { val (limitEntries, minParam, maxParam) = zrangebyScoreWithScoreInternal(min, minInclusive, max, maxInclusive, limit) @@ -71,13 +56,13 @@ trait SortedSetOperations { send(params._1, params._2)(asList.map(_.flatten)) } - def zrangebyscoreWithScore[A](key: Any, - min: Double = Double.NegativeInfinity, - minInclusive: Boolean = true, - max: Double = Double.PositiveInfinity, - maxInclusive: Boolean = true, - limit: Option[(Int, Int)], - sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[(A, Double)]] = { + override def zrangebyscoreWithScore[A](key: Any, + min: Double = Double.NegativeInfinity, + minInclusive: Boolean = true, + max: Double = Double.PositiveInfinity, + maxInclusive: Boolean = true, + limit: Option[(Int, Int)], + sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[(A, Double)]] = { val (limitEntries, minParam, maxParam) = zrangebyScoreWithScoreInternal(min, minInclusive, max, maxInclusive, limit) @@ -109,46 +94,31 @@ trait SortedSetOperations { (limitEntries, minParam, maxParam) } - // ZRANK - // ZREVRANK - // - def zrank(key: Any, member: Any, reverse: Boolean = false)(implicit format: Format): Option[Long] = + override def zrank(key: Any, member: Any, reverse: Boolean = false)(implicit format: Format): Option[Long] = send(if (reverse) "ZREVRANK" else "ZRANK", List(key, member))(asLong) - // ZREMRANGEBYRANK - // - def zremrangebyrank(key: Any, start: Int = 0, end: Int = -1)(implicit format: Format): Option[Long] = + override def zremrangebyrank(key: Any, start: Int = 0, end: Int = -1)(implicit format: Format): Option[Long] = send("ZREMRANGEBYRANK", List(key, start, end))(asLong) - // ZREMRANGEBYSCORE - // - def zremrangebyscore(key: Any, start: Double = Double.NegativeInfinity, end: Double = Double.PositiveInfinity)(implicit format: Format): Option[Long] = + override def zremrangebyscore(key: Any, start: Double = Double.NegativeInfinity, end: Double = Double.PositiveInfinity)(implicit format: Format): Option[Long] = send("ZREMRANGEBYSCORE", List(key, start, end))(asLong) - // ZUNION - // - def zunionstore(dstKey: Any, keys: Iterable[Any], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = + override def zunionstore(dstKey: Any, keys: Iterable[Any], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = send("ZUNIONSTORE", (Iterator(dstKey, keys.size) ++ keys.iterator ++ Iterator("AGGREGATE", aggregate)).toList)(asLong) - def zunionstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any, Double]], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = + override def zunionstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any, Double]], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = send("ZUNIONSTORE", (Iterator(dstKey, kws.size) ++ kws.iterator.map(_._1) ++ Iterator.single("WEIGHTS") ++ kws.iterator.map(_._2) ++ Iterator("AGGREGATE", aggregate)).toList)(asLong) - // ZINTERSTORE - // - def zinterstore(dstKey: Any, keys: Iterable[Any], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = + override def zinterstore(dstKey: Any, keys: Iterable[Any], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = send("ZINTERSTORE", (Iterator(dstKey, keys.size) ++ keys.iterator ++ Iterator("AGGREGATE", aggregate)).toList)(asLong) - def zinterstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any, Double]], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = + override def zinterstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any, Double]], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = send("ZINTERSTORE", (Iterator(dstKey, kws.size) ++ kws.iterator.map(_._1) ++ Iterator.single("WEIGHTS") ++ kws.iterator.map(_._2) ++ Iterator("AGGREGATE", aggregate)).toList)(asLong) - // ZCOUNT - // - def zcount(key: Any, min: Double = Double.NegativeInfinity, max: Double = Double.PositiveInfinity, minInclusive: Boolean = true, maxInclusive: Boolean = true)(implicit format: Format): Option[Long] = + override def zcount(key: Any, min: Double = Double.NegativeInfinity, max: Double = Double.PositiveInfinity, minInclusive: Boolean = true, maxInclusive: Boolean = true)(implicit format: Format): Option[Long] = send("ZCOUNT", List(key, Format.formatDouble(min, minInclusive), Format.formatDouble(max, maxInclusive)))(asLong) - // ZSCAN - // Incrementally iterate sorted sets elements and associated scores (since 2.8) - def zscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = + override def zscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = send("ZSCAN", key :: cursor :: ((x: List[Any]) => if (pattern == "*") x else "match" :: pattern :: x) (if (count == 10) Nil else List("count", count)))(asPair) } diff --git a/src/main/scala/com/redis/StringOperations.scala b/src/main/scala/com/redis/StringOperations.scala index 6b311905..00420337 100644 --- a/src/main/scala/com/redis/StringOperations.scala +++ b/src/main/scala/com/redis/StringOperations.scala @@ -1,27 +1,20 @@ package com.redis -import serialization._ +import com.redis.api.StringApi +import com.redis.serialization._ -trait StringOperations { self: Redis => +trait StringOperations extends StringApi { + self: Redis => - // SET KEY (key, value) - // sets the key with the specified value. - def set(key: Any, value: Any)(implicit format: Format): Boolean = + override def set(key: Any, value: Any)(implicit format: Format): Boolean = send("SET", List(key, value))(asBoolean) - // SET key value [EX seconds] [PX milliseconds] [NX|XX] - // set the string value of a key - // Starting with Redis 2.6.12 SET supports a set of options that modify its behavior: - // EX seconds -- Set the specified expire time, in seconds. - // PX milliseconds -- Set the specified expire time, in milliseconds. - // NX -- Only set the key if it does not already exist. - // XX -- Only set the key if it already exist. @deprecated("Use the more typesafe variant", "2.14") def set(key: Any, value: Any, nxxx: Any, expx: Any, time: Long): Boolean = { send("SET", List(key, value, nxxx, expx, time))(asBoolean) } - def set(key: Any, value: Any, onlyIfExists: Boolean, time: SecondsOrMillis): Boolean = { + override def set(key: Any, value: Any, onlyIfExists: Boolean, time: SecondsOrMillis): Boolean = { val nxxx = if (onlyIfExists) "XX" else "NX" val expx = time match { case Seconds(v) => List("EX", v) @@ -30,104 +23,67 @@ trait StringOperations { self: Redis => send("SET", List(key, value, nxxx) ++ expx)(asBoolean) } - // GET (key) - // gets the value for the specified key. - def get[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + override def get[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = send("GET", List(key))(asBulk) - // GETSET (key, value) - // is an atomic set this value and return the old value command. - def getset[A](key: Any, value: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + override def getset[A](key: Any, value: Any)(implicit format: Format, parse: Parse[A]): Option[A] = send("GETSET", List(key, value))(asBulk) - // SETNX (key, value) - // sets the value for the specified key, only if the key is not there. - def setnx(key: Any, value: Any)(implicit format: Format): Boolean = + override def setnx(key: Any, value: Any)(implicit format: Format): Boolean = send("SETNX", List(key, value))(asBoolean) - def setex(key: Any, expiry: Long, value: Any)(implicit format: Format): Boolean = + override def setex(key: Any, expiry: Long, value: Any)(implicit format: Format): Boolean = send("SETEX", List(key, expiry, value))(asBoolean) - def psetex(key: Any, expiryInMillis: Long, value: Any)(implicit format: Format): Boolean = + override def psetex(key: Any, expiryInMillis: Long, value: Any)(implicit format: Format): Boolean = send("PSETEX", List(key, expiryInMillis, value))(asBoolean) - // INCR (key) - // increments the specified key by 1 - def incr(key: Any)(implicit format: Format): Option[Long] = + override def incr(key: Any)(implicit format: Format): Option[Long] = send("INCR", List(key))(asLong) - // INCR (key, increment) - // increments the specified key by increment - def incrby(key: Any, increment: Long)(implicit format: Format): Option[Long] = + override def incrby(key: Any, increment: Long)(implicit format: Format): Option[Long] = send("INCRBY", List(key, increment))(asLong) - def incrbyfloat(key: Any, increment: Float)(implicit format: Format): Option[Float] = + override def incrbyfloat(key: Any, increment: Float)(implicit format: Format): Option[Float] = send("INCRBYFLOAT", List(key, increment))(asBulk.map(_.toFloat)) - - // DECR (key) - // decrements the specified key by 1 - def decr(key: Any)(implicit format: Format): Option[Long] = + + override def decr(key: Any)(implicit format: Format): Option[Long] = send("DECR", List(key))(asLong) - // DECR (key, increment) - // decrements the specified key by increment - def decrby(key: Any, increment: Long)(implicit format: Format): Option[Long] = + override def decrby(key: Any, increment: Long)(implicit format: Format): Option[Long] = send("DECRBY", List(key, increment))(asLong) - // MGET (key, key, key, ...) - // get the values of all the specified keys. - def mget[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + override def mget[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = send("MGET", key :: keys.toList)(asList) - // MSET (key1 value1 key2 value2 ..) - // set the respective key value pairs. Overwrite value if key exists - def mset(kvs: (Any, Any)*)(implicit format: Format): Boolean = - send("MSET", kvs.foldRight(List[Any]()){ case ((k,v),l) => k :: v :: l })(asBoolean) + override def mset(kvs: (Any, Any)*)(implicit format: Format): Boolean = + send("MSET", kvs.foldRight(List[Any]()) { case ((k, v), l) => k :: v :: l })(asBoolean) - // MSETNX (key1 value1 key2 value2 ..) - // set the respective key value pairs. Noop if any key exists - def msetnx(kvs: (Any, Any)*)(implicit format: Format): Boolean = - send("MSETNX", kvs.foldRight(List[Any]()){ case ((k,v),l) => k :: v :: l })(asBoolean) + override def msetnx(kvs: (Any, Any)*)(implicit format: Format): Boolean = + send("MSETNX", kvs.foldRight(List[Any]()) { case ((k, v), l) => k :: v :: l })(asBoolean) - // SETRANGE key offset value - // Overwrites part of the string stored at key, starting at the specified offset, - // for the entire length of value. - def setrange(key: Any, offset: Int, value: Any)(implicit format: Format): Option[Long] = + override def setrange(key: Any, offset: Int, value: Any)(implicit format: Format): Option[Long] = send("SETRANGE", List(key, offset, value))(asLong) - // GETRANGE key start end - // Returns the substring of the string value stored at key, determined by the offsets - // start and end (both are inclusive). - def getrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]): Option[A] = + override def getrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]): Option[A] = send("GETRANGE", List(key, start, end))(asBulk) - // STRLEN key - // gets the length of the value associated with the key - def strlen(key: Any)(implicit format: Format): Option[Long] = + override def strlen(key: Any)(implicit format: Format): Option[Long] = send("STRLEN", List(key))(asLong) - // APPEND KEY (key, value) - // appends the key value with the specified value. - def append(key: Any, value: Any)(implicit format: Format): Option[Long] = + override def append(key: Any, value: Any)(implicit format: Format): Option[Long] = send("APPEND", List(key, value))(asLong) - // GETBIT key offset - // Returns the bit value at offset in the string value stored at key - def getbit(key: Any, offset: Int)(implicit format: Format): Option[Int] = + override def getbit(key: Any, offset: Int)(implicit format: Format): Option[Int] = send("GETBIT", List(key, offset))(asInt) - // SETBIT key offset value - // Sets or clears the bit at offset in the string value stored at key - def setbit(key: Any, offset: Int, value: Any)(implicit format: Format): Option[Int] = + override def setbit(key: Any, offset: Int, value: Any)(implicit format: Format): Option[Int] = send("SETBIT", List(key, offset, value))(asInt) - // BITOP op destKey srcKey... - // Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key. - def bitop(op: String, destKey: Any, srcKeys: Any*)(implicit format: Format): Option[Int] = + override def bitop(op: String, destKey: Any, srcKeys: Any*)(implicit format: Format): Option[Int] = send("BITOP", op :: destKey :: srcKeys.toList)(asInt) - // BITCOUNT key range - // Count the number of set bits in the given key within the optional range - def bitcount(key: Any, range: Option[(Int, Int)] = None)(implicit format: Format): Option[Int] = + override def bitcount(key: Any, range: Option[(Int, Int)] = None)(implicit format: Format): Option[Int] = send("BITCOUNT", List[Any](key) ++ (range.map { r => List[Any](r._1, r._2) } getOrElse List[Any]()))(asInt) + } diff --git a/src/main/scala/com/redis/api/BaseApi.scala b/src/main/scala/com/redis/api/BaseApi.scala new file mode 100644 index 00000000..d6d4200b --- /dev/null +++ b/src/main/scala/com/redis/api/BaseApi.scala @@ -0,0 +1,170 @@ +package com.redis.api + +import com.redis.serialization.{Format, Parse} + +trait BaseApi { + + /** + * sort keys in a set, and optionally pull values for them + */ + def sort[A](key: String, + limit: Option[(Int, Int)] = None, + desc: Boolean = false, + alpha: Boolean = false, + by: Option[String] = None, + get: List[String] = Nil)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + /** + * sort keys in a set, and stores result in the supplied key + */ + def sortNStore[A](key: String, + limit: Option[(Int, Int)] = None, + desc: Boolean = false, + alpha: Boolean = false, + by: Option[String] = None, + get: List[String] = Nil, + storeAt: String)(implicit format: Format, parse: Parse[A]): Option[Long] + + /** + * returns all the keys matching the glob-style pattern. + */ + def keys[A](pattern: Any = "*")(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + /** + * returns the current server time as a two items lists: + * a Unix timestamp and the amount of microseconds already elapsed in the current second. + */ + def time[A](implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + /** + * returns a randomly selected key from the currently selected DB. + */ + def randomkey[A](implicit parse: Parse[A]): Option[A] + + /** + * atomically renames the key oldkey to newkey. + */ + def rename(oldkey: Any, newkey: Any)(implicit format: Format): Boolean + + /** + * rename oldkey into newkey but fails if the destination key newkey already exists. + */ + def renamenx(oldkey: Any, newkey: Any)(implicit format: Format): Boolean + + /** + * returns the size of the db. + */ + def dbsize: Option[Long] + + /** + * test if the specified key exists. + */ + def exists(key: Any)(implicit format: Format): Boolean + + /** + * deletes the specified keys. + */ + def del(key: Any, keys: Any*)(implicit format: Format): Option[Long] + + /** + * returns the type of the value stored at key in form of a string. + */ + def getType(key: Any)(implicit format: Format): Option[String] + + /** + * sets the expire time (in sec.) for the specified key. + */ + def expire(key: Any, ttl: Int)(implicit format: Format): Boolean + + /** + * sets the expire time (in milli sec.) for the specified key. + */ + def pexpire(key: Any, ttlInMillis: Int)(implicit format: Format): Boolean + + /** + * sets the expire time for the specified key. + */ + def expireat(key: Any, timestamp: Long)(implicit format: Format): Boolean + + /** + * sets the expire timestamp in millis for the specified key. + */ + def pexpireat(key: Any, timestampInMillis: Long)(implicit format: Format): Boolean + + /** + * returns the remaining time to live of a key that has a timeout + */ + def ttl(key: Any)(implicit format: Format): Option[Long] + + /** + * returns the remaining time to live of a key that has a timeout in millis + */ + def pttl(key: Any)(implicit format: Format): Option[Long] + + /** + * selects the DB to connect, defaults to 0 (zero). + */ + def select(index: Int): Boolean + + /** + * removes all the DB data. + */ + def flushdb: Boolean + + /** + * removes data from all the DB's. + */ + def flushall: Boolean + + /** + * Move the specified key from the currently selected DB to the specified destination DB. + */ + def move(key: Any, db: Int)(implicit format: Format): Boolean + + /** + * exits the server. + */ + def quit: Boolean + + /** + * auths with the server. + */ + def auth(secret: Any)(implicit format: Format): Boolean + + /** + * Remove the existing timeout on key, turning the key from volatile (a key with an expire set) + * to persistent (a key that will never expire as no timeout is associated). + */ + def persist(key: Any)(implicit format: Format): Boolean + + /** + * Incrementally iterate the keys space (since 2.8) + */ + def scan[A](cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] + + /** + * ping + */ + def ping: Option[String] + + /** + * Marks the given keys to be watched for conditional execution of a transaction. + */ + def watch(key: Any, keys: Any*)(implicit format: Format): Boolean + + /** + * Flushes all the previously watched keys for a transaction + */ + def unwatch(): Boolean + + /** + * CONFIG GET + */ + def getConfig(key: Any = "*")(implicit format: Format): Option[Map[String, Option[String]]] + + /** + * CONFIG SET + */ + def setConfig(key: Any, value: Any)(implicit format: Format): Option[String] + +} diff --git a/src/main/scala/com/redis/api/EvalApi.scala b/src/main/scala/com/redis/api/EvalApi.scala new file mode 100644 index 00000000..615e40d5 --- /dev/null +++ b/src/main/scala/com/redis/api/EvalApi.scala @@ -0,0 +1,28 @@ +package com.redis.api + +import com.redis.serialization.{Format, Parse} + +trait EvalApi { + + /** + * evaluates lua code on the server. + */ + def evalMultiBulk[A](luaCode: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + def evalBulk[A](luaCode: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] + + def evalInt(luaCode: String, keys: List[Any], args: List[Any]): Option[Int] + + def evalMultiSHA[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + def evalSHA[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] + + def evalSHABulk[A](shahash: String, keys: List[Any], args: List[Any])(implicit format: Format, parse: Parse[A]): Option[A] + + def scriptLoad(luaCode: String): Option[String] + + def scriptExists(shahash: String): Option[Int] + + def scriptFlush: Option[String] + +} diff --git a/src/main/scala/com/redis/api/GeoApi.scala b/src/main/scala/com/redis/api/GeoApi.scala new file mode 100644 index 00000000..5de4ead3 --- /dev/null +++ b/src/main/scala/com/redis/api/GeoApi.scala @@ -0,0 +1,102 @@ +package com.redis.api + +import com.redis.GeoRadiusMember +import com.redis.serialization.{Format, Parse} + +trait GeoApi { + + /** + * Add the given members in the key geo sorted set + * + * @param key The geo sorted set + * @param members The members to be added. Format is (longitude, latitude, member) + * @return The number of elements added to the index. Repeated elements are not added. + */ + def geoadd(key: Any, members: Iterable[Product3[Any, Any, Any]]): Option[Int] + + /** + * Retrieve the position of the members in the key geo sorted set. Note that if a member is not part of the set, None + * will be returned for this element. + * + * @param key + * @param members + * @param format + * @param parse + * @tparam A + * @return the coordinates of the input members in the same order. + */ + def geopos[A](key: Any, members: Iterable[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[List[Option[A]]]]] + + /** + * Get the geohash for each member in the key geo index. + * + * @param key + * @param members + * @param format + * @param parse + * @tparam A + * @return The geohash of each queried member. + */ + def geohash[A](key: Any, members: Iterable[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + def geodist(key: Any, m1: Any, m2: Any, unit: Option[Any]): Option[String] + + /** + * Search for members around an origin point in the key geo sorted set + * + * @param key The geo index we are searching in + * @param longitude The base longitude for distance computation + * @param latitude The base latitude for distance computation + * @param radius The radius of the circle we want to search in + * @param unit The unit of the radius. Can be m (meters), km (kilometers), mi (miles), ft (feet) + * @param withCoord If true, the coordinate of the found members will be returned in the result + * @param withDist If true, the distance between the origin and the found members will be returned in the result + * @param withHash If true, the hash of the found members will be returned in the result + * @param count Max number of expected results + * @param sort The sorting strategy. If empty, order is not guaranteed. Can be ASC (ascending) or DESC (descending) + * @param store The Redis store we want to write the result in + * @param storeDist The redis storedist we want to write the result in + * @return The found members as GeoRadiusMember instances + */ + def georadius(key: Any, + longitude: Any, + latitude: Any, + radius: Any, + unit: Any, + withCoord: Boolean, + withDist: Boolean, + withHash: Boolean, + count: Option[Int], + sort: Option[Any], + store: Option[Any], + storeDist: Option[Any]): Option[List[Option[GeoRadiusMember]]] + + /** + * Search for members around a specific memberin the key geo sorted set + * + * @param key The geo index we are searching in + * @param member The member we are searching around + * @param radius The radius of the circle we want to search in + * @param unit The unit of the radius. Can be m (meters), km (kilometers), mi (miles), ft (feet) + * @param withCoord If true, the coordinate of the found members will be returned in the result + * @param withDist If true, the distance between the origin and the found members will be returned in the result + * @param withHash If true, the hash of the found members will be returned in the result + * @param count Max number of expected results + * @param sort The sorting strategy. If empty, order is not guaranteed. Can be ASC (ascending) or DESC (descending) + * @param store The Redis store we want to write the result in + * @param storeDist The redis storedist we want to write the result in + * @return The found members as GeoRadiusMember instances + */ + def georadiusbymember[A](key: Any, + member: Any, + radius: Any, + unit: Any, + withCoord: Boolean, + withDist: Boolean, + withHash: Boolean, + count: Option[Int], + sort: Option[Any], + store: Option[Any], + storeDist: Option[Any])(implicit format: Format, parse: Parse[A]): Option[List[Option[GeoRadiusMember]]] + +} diff --git a/src/main/scala/com/redis/api/HashApi.scala b/src/main/scala/com/redis/api/HashApi.scala new file mode 100644 index 00000000..42722def --- /dev/null +++ b/src/main/scala/com/redis/api/HashApi.scala @@ -0,0 +1,84 @@ +package com.redis.api + +import com.redis.serialization.{Format, Parse} + +trait HashApi { + + /** + * Sets field in the hash stored at key to value. + * If key does not exist, a new key holding a hash is created. + * If field already exists in the hash, it is overwritten. + * + * @see [[http://redis.io/commands/hset HSET documentation]] + * @deprecated return value semantics is inconsistent with [[com.redis.HashOperations#hsetnx]] and + * [[com.redis.HashOperations#hmset]]. Use [[com.redis.HashOperations#hset1]] instead + * @return True if field is a new field in the hash and value was set, + * False if field already exists in the hash and the value was updated. + * + */ + def hset(key: Any, field: Any, value: Any)(implicit format: Format): Boolean + + /** + * Sets field in the hash stored at key to value. + * If key does not exist, a new key holding a hash is created. + * If field already exists in the hash, it is overwritten. + * + * @see [[http://redis.io/commands/hset HSET documentation]] + * @return Some(0) if field is a new field in the hash and value was set, + * Some(1) if field already exists in the hash and the value was updated. + */ + def hset1(key: Any, field: Any, value: Any)(implicit format: Format): Option[Long] + + /** + * Sets field in the hash stored at key to value, only if field does not yet exist. + * If key does not exist, a new key holding a hash is created. + * If field already exists, this operation has no effect. + * + * @see [[http://redis.io/commands/hsetnx HSETNX documentation]] + * @return True if field is a new field in the hash and value was set. + * False if field exists in the hash and no operation was performed. + */ + def hsetnx(key: Any, field: Any, value: Any)(implicit format: Format): Boolean + + def hget[A](key: Any, field: Any)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * Sets the specified fields to their respective values in the hash stored at key. + * This command overwrites any existing fields in the hash. + * If key does not exist, a new key holding a hash is created. + * + * @param map from fields to values + * @see [[http://redis.io/commands/hmset HMSET documentation]] + * @return True if operation completed successfully, + * False otherwise. + */ + def hmset(key: Any, map: Iterable[Product2[Any, Any]])(implicit format: Format): Boolean + + def hmget[K, V](key: Any, fields: K*)(implicit format: Format, parseV: Parse[V]): Option[Map[K, V]] + + def hincrby(key: Any, field: Any, value: Long)(implicit format: Format): Option[Long] + + def hincrbyfloat(key: Any, field: Any, value: Float)(implicit format: Format): Option[Float] + + def hexists(key: Any, field: Any)(implicit format: Format): Boolean + + def hdel(key: Any, field: Any, fields: Any*)(implicit format: Format): Option[Long] + + def hlen(key: Any)(implicit format: Format): Option[Long] + + def hkeys[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[List[A]] + + def hvals[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[List[A]] + + @deprecated("Use the more idiomatic variant hgetall1, which has the returned Map behavior more consistent. See issue https://github.com/debasishg/scala-redis/issues/122", "3.2") + def hgetall[K, V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[Map[K, V]] + + def hgetall1[K, V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[Map[K, V]] + + /** + * Incrementally iterate hash fields and associated values (since 2.8) + */ + def hscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] + + +} diff --git a/src/main/scala/com/redis/api/HyperLogLogApi.scala b/src/main/scala/com/redis/api/HyperLogLogApi.scala new file mode 100644 index 00000000..31962e80 --- /dev/null +++ b/src/main/scala/com/redis/api/HyperLogLogApi.scala @@ -0,0 +1,19 @@ +package com.redis.api + +trait HyperLogLogApi { + + /** + * Add a value to the hyperloglog (>= 2.8.9) + */ + def pfadd(key: Any, value: Any, values: Any*): Option[Long] + + /** + * Get the estimated cardinality from one or more keys (>= 2.8.9) + */ + def pfcount(keys: Any*): Option[Long] + + /** + * Merge existing keys (>= 2.8.9) + */ + def pfmerge(destination: Any, sources: Any*): Boolean +} \ No newline at end of file diff --git a/src/main/scala/com/redis/api/ListApi.scala b/src/main/scala/com/redis/api/ListApi.scala new file mode 100644 index 00000000..3b242a29 --- /dev/null +++ b/src/main/scala/com/redis/api/ListApi.scala @@ -0,0 +1,82 @@ +package com.redis.api + +import com.redis.serialization.{Format, Parse} + +trait ListApi { + + /** + * add values to the head of the list stored at key (Variadic: >= 2.4) + */ + def lpush(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] + + /** + * add value to the head of the list stored at key (Variadic: >= 2.4) + */ + def lpushx(key: Any, value: Any)(implicit format: Format): Option[Long] + + /** + * add values to the tail of the list stored at key (Variadic: >= 2.4) + */ + def rpush(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] + + /** + * add value to the tail of the list stored at key (Variadic: >= 2.4) + */ + def rpushx(key: Any, value: Any)(implicit format: Format): Option[Long] + + /** + * return the length of the list stored at the specified key. + * If the key does not exist zero is returned (the same behaviour as for empty lists). + * If the value stored at key is not a list an error is returned. + */ + def llen(key: Any)(implicit format: Format): Option[Long] + + /** + * return the specified elements of the list stored at the specified key. + * Start and end are zero-based indexes. + */ + def lrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + /** + * Trim an existing list so that it will contain only the specified range of elements specified. + */ + def ltrim(key: Any, start: Int, end: Int)(implicit format: Format): Boolean + + /** + * return the especified element of the list stored at the specified key. + * Negative indexes are supported, for example -1 is the last element, -2 the penultimate and so on. + */ + def lindex[A](key: Any, index: Int)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * set the list element at index with the new value. Out of range indexes will generate an error + */ + def lset(key: Any, index: Int, value: Any)(implicit format: Format): Boolean + + /** + * Remove the first count occurrences of the value element from the list. + */ + def lrem(key: Any, count: Int, value: Any)(implicit format: Format): Option[Long] + + /** + * atomically return and remove the first (LPOP) or last (RPOP) element of the list + */ + def lpop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * atomically return and remove the first (LPOP) or last (RPOP) element of the list + */ + def rpop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * Remove the first count occurrences of the value element from the list. + */ + def rpoplpush[A](srcKey: Any, dstKey: Any)(implicit format: Format, parse: Parse[A]): Option[A] + + def brpoplpush[A](srcKey: Any, dstKey: Any, timeoutInSeconds: Int)(implicit format: Format, parse: Parse[A]): Option[A] + + def blpop[K, V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[(K, V)] + + def brpop[K, V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[(K, V)] + +} \ No newline at end of file diff --git a/src/main/scala/com/redis/api/NodeApi.scala b/src/main/scala/com/redis/api/NodeApi.scala new file mode 100644 index 00000000..23e25d0f --- /dev/null +++ b/src/main/scala/com/redis/api/NodeApi.scala @@ -0,0 +1,42 @@ +package com.redis.api + +trait NodeApi { + + /** + * save the DB on disk now. + */ + def save: Boolean + + /** + * save the DB in the background. + */ + def bgsave: Boolean + + /** + * return the UNIX TIME of the last DB SAVE executed with success. + */ + def lastsave: Option[Long] + + /** + * Stop all the clients, save the DB, then quit the server. + */ + def shutdown: Boolean + + def bgrewriteaof: Boolean + + /** + * The info command returns different information and statistics about the server. + */ + def info: Option[String] + + /** + * is a debugging command that outputs the whole sequence of commands received by the Redis server. + */ + def monitor: Boolean + + /** + * The SLAVEOF command can change the replication settings of a slave on the fly. + */ + def slaveof(options: Any): Boolean + +} diff --git a/src/main/scala/com/redis/api/SetApi.scala b/src/main/scala/com/redis/api/SetApi.scala new file mode 100644 index 00000000..342c750a --- /dev/null +++ b/src/main/scala/com/redis/api/SetApi.scala @@ -0,0 +1,99 @@ +package com.redis.api + +import com.redis.serialization.{Format, Parse} + +trait SetApi { + + /** + * Add the specified members to the set value stored at key. (VARIADIC: >= 2.4) + */ + def sadd(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] + + /** + * Remove the specified members from the set value stored at key. (VARIADIC: >= 2.4) + */ + def srem(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] + + /** + * Remove and return (pop) a random element from the Set value at key. + */ + def spop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * Remove and return multiple random elements (pop) from the Set value at key since (3.2). + */ + def spop[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] + + /** + * Move the specified member from one Set to another atomically. + */ + def smove(sourceKey: Any, destKey: Any, value: Any)(implicit format: Format): Option[Long] + + /** + * Return the number of elements (the cardinality) of the Set at key. + */ + def scard(key: Any)(implicit format: Format): Option[Long] + + /** + * Test if the specified value is a member of the Set at key. + */ + def sismember(key: Any, value: Any)(implicit format: Format): Boolean + + /** + * Return the intersection between the Sets stored at key1, key2, ..., keyN. + */ + def sinter[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] + + /** + * Compute the intersection between the Sets stored at key1, key2, ..., keyN, + * and store the resulting Set at dstkey. + * SINTERSTORE returns the size of the intersection, unlike what the documentation says + * refer http://code.google.com/p/redis/issues/detail?id=121 + */ + def sinterstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] + + /** + * Return the union between the Sets stored at key1, key2, ..., keyN. + */ + def sunion[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] + + /** + * Compute the union between the Sets stored at key1, key2, ..., keyN, + * and store the resulting Set at dstkey. + * SUNIONSTORE returns the size of the union, unlike what the documentation says + * refer http://code.google.com/p/redis/issues/detail?id=121 + */ + def sunionstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] + + /** + * Return the difference between the Set stored at key1 and all the Sets key2, ..., keyN. + */ + def sdiff[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] + + /** + * Compute the difference between the Set key1 and all the Sets key2, ..., keyN, + * and store the resulting Set at dstkey. + */ + def sdiffstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] + + /** + * Return all the members of the Set value at key. + */ + def smembers[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] + + /** + * Return a random element from a Set + */ + def srandmember[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * Return multiple random elements from a Set (since 2.6) + */ + def srandmember[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + /** + * Incrementally iterate Set elements (since 2.8) + */ + def sscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] + +} diff --git a/src/main/scala/com/redis/api/SortedSetApi.scala b/src/main/scala/com/redis/api/SortedSetApi.scala new file mode 100644 index 00000000..d33c7175 --- /dev/null +++ b/src/main/scala/com/redis/api/SortedSetApi.scala @@ -0,0 +1,66 @@ +package com.redis.api + +import com.redis.RedisClient.{ASC, Aggregate, SUM, SortOrder} +import com.redis.serialization.{Format, Parse} + +trait SortedSetApi { + + /** + * Add the specified members having the specified score to the sorted set stored at key. (Variadic: >= 2.4) + */ + def zadd(key: Any, score: Double, member: Any, scoreVals: (Double, Any)*)(implicit format: Format): Option[Long] + + /** + * Remove the specified members from the sorted set value stored at key. (Variadic: >= 2.4) + */ + def zrem(key: Any, member: Any, members: Any*)(implicit format: Format): Option[Long] + + def zincrby(key: Any, incr: Double, member: Any)(implicit format: Format): Option[Double] + + def zcard(key: Any)(implicit format: Format): Option[Long] + + def zscore(key: Any, element: Any)(implicit format: Format): Option[Double] + + def zrange[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[A]] + + def zrangeWithScore[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[(A, Double)]] + + def zrangebylex[A](key: Any, min: String, max: String, limit: Option[(Int, Int)])(implicit format: Format, parse: Parse[A]): Option[List[A]] + + def zrangebyscore[A](key: Any, + min: Double = Double.NegativeInfinity, + minInclusive: Boolean = true, + max: Double = Double.PositiveInfinity, + maxInclusive: Boolean = true, + limit: Option[(Int, Int)], + sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[A]] + + def zrangebyscoreWithScore[A](key: Any, + min: Double = Double.NegativeInfinity, + minInclusive: Boolean = true, + max: Double = Double.PositiveInfinity, + maxInclusive: Boolean = true, + limit: Option[(Int, Int)], + sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[(A, Double)]] + + def zrank(key: Any, member: Any, reverse: Boolean = false)(implicit format: Format): Option[Long] + + def zremrangebyrank(key: Any, start: Int = 0, end: Int = -1)(implicit format: Format): Option[Long] + + def zremrangebyscore(key: Any, start: Double = Double.NegativeInfinity, end: Double = Double.PositiveInfinity)(implicit format: Format): Option[Long] + + def zunionstore(dstKey: Any, keys: Iterable[Any], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] + + def zunionstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any, Double]], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] + + def zinterstore(dstKey: Any, keys: Iterable[Any], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] + + def zinterstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any, Double]], aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] + + def zcount(key: Any, min: Double = Double.NegativeInfinity, max: Double = Double.PositiveInfinity, minInclusive: Boolean = true, maxInclusive: Boolean = true)(implicit format: Format): Option[Long] + + /** + * Incrementally iterate sorted sets elements and associated scores (since 2.8) + */ + def zscan[A](key: Any, cursor: Int, pattern: Any = "*", count: Int = 10)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] +} diff --git a/src/main/scala/com/redis/api/StringApi.scala b/src/main/scala/com/redis/api/StringApi.scala new file mode 100644 index 00000000..e206e54c --- /dev/null +++ b/src/main/scala/com/redis/api/StringApi.scala @@ -0,0 +1,113 @@ +package com.redis.api + +import com.redis.SecondsOrMillis +import com.redis.serialization.{Format, Parse} + +trait StringApi { + + /** + * sets the key with the specified value. + */ + def set(key: Any, value: Any)(implicit format: Format): Boolean + + def set(key: Any, value: Any, onlyIfExists: Boolean, time: SecondsOrMillis): Boolean + + /** + * gets the value for the specified key. + */ + def get[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * is an atomic set this value and return the old value command. + */ + def getset[A](key: Any, value: Any)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * sets the value for the specified key, only if the key is not there. + */ + def setnx(key: Any, value: Any)(implicit format: Format): Boolean + + def setex(key: Any, expiry: Long, value: Any)(implicit format: Format): Boolean + + def psetex(key: Any, expiryInMillis: Long, value: Any)(implicit format: Format): Boolean + + /** + * increments the specified key by 1 + */ + def incr(key: Any)(implicit format: Format): Option[Long] + + /** + * increments the specified key by increment + */ + def incrby(key: Any, increment: Long)(implicit format: Format): Option[Long] + + def incrbyfloat(key: Any, increment: Float)(implicit format: Format): Option[Float] + + /** + * decrements the specified key by 1 + */ + def decr(key: Any)(implicit format: Format): Option[Long] + + /** + * decrements the specified key by increment + */ + def decrby(key: Any, increment: Long)(implicit format: Format): Option[Long] + + /** + * get the values of all the specified keys. + */ + def mget[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] + + /** + * set the respective key value pairs. Overwrite value if key exists + */ + def mset(kvs: (Any, Any)*)(implicit format: Format): Boolean + + /** + * set the respective key value pairs. Noop if any key exists + */ + def msetnx(kvs: (Any, Any)*)(implicit format: Format): Boolean + + /** + * SETRANGE key offset value + * Overwrites part of the string stored at key, starting at the specified offset, + * for the entire length of value. + */ + def setrange(key: Any, offset: Int, value: Any)(implicit format: Format): Option[Long] + + /** + * Returns the substring of the string value stored at key, determined by the offsets + * start and end (both are inclusive). + */ + def getrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]): Option[A] + + /** + * gets the length of the value associated with the key + */ + def strlen(key: Any)(implicit format: Format): Option[Long] + + /** + * appends the key value with the specified value. + */ + def append(key: Any, value: Any)(implicit format: Format): Option[Long] + + /** + * Returns the bit value at offset in the string value stored at key + */ + def getbit(key: Any, offset: Int)(implicit format: Format): Option[Int] + + /** + * Sets or clears the bit at offset in the string value stored at key + */ + def setbit(key: Any, offset: Int, value: Any)(implicit format: Format): Option[Int] + + /** + * Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key. + */ + def bitop(op: String, destKey: Any, srcKeys: Any*)(implicit format: Format): Option[Int] + + /** + * Count the number of set bits in the given key within the optional range + */ + def bitcount(key: Any, range: Option[(Int, Int)] = None)(implicit format: Format): Option[Int] +} diff --git a/src/main/scala/com/redis/cluster/BaseOps.scala b/src/main/scala/com/redis/cluster/BaseOps.scala new file mode 100644 index 00000000..3da7c62f --- /dev/null +++ b/src/main/scala/com/redis/cluster/BaseOps.scala @@ -0,0 +1,110 @@ +package com.redis.cluster + +import com.redis.api.BaseApi +import com.redis.serialization.{Format, Parse} + +trait BaseOps extends BaseApi { + rc: RedisClusterOps => + + override def keys[A](pattern: Any)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = Some { + onAllConns(_.keys[A](pattern)) + .foldLeft(List.empty[Option[A]]) { + case (acc, el) => el match { + case Some(x) => x ::: acc + case None => acc + } + } + } + + override def rename(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = + processForKey(oldkey)(_.rename(oldkey, newkey)) + + override def renamenx(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = + processForKey(oldkey)(_.renamenx(oldkey, newkey)) + + override def dbsize: Option[Long] = { + val r = onAllConns(_.dbsize).flatten + if (r.isEmpty) None else Some(r.sum) + } + + override def exists(key: Any)(implicit format: Format): Boolean = + processForKey(key)(_.exists(key)) + + override def del(key: Any, keys: Any*)(implicit format: Format): Option[Long] = { + val r = (key :: keys.toList).groupBy(nodeForKey) + .flatMap { case (r, keys) => r.withClient(_.del(keys.head, keys.tail)) } + if (r.isEmpty) None else Some(r.sum) + } + + override def getType(key: Any)(implicit format: Format): Option[String] = + processForKey(key)(_.getType(key)) + + override def expire(key: Any, ttl: Int)(implicit format: Format): Boolean = + processForKey(key)(_.expire(key, ttl)) + + override def pexpire(key: Any, ttlInMillis: Int)(implicit format: Format): Boolean = + processForKey(key)(_.pexpire(key, ttlInMillis)) + + override def expireat(key: Any, timestamp: Long)(implicit format: Format): Boolean = + processForKey(key)(_.expireat(key, timestamp)) + + override def pexpireat(key: Any, timestampInMillis: Long)(implicit format: Format): Boolean = + processForKey(key)(_.pexpireat(key, timestampInMillis)) + + override def ttl(key: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.ttl(key)) + + override def pttl(key: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.pttl(key)) + + override def flushdb: Boolean = + onAllConns(_.flushdb) forall (_ == true) + + override def flushall: Boolean = + onAllConns(_.flushall) forall (_ == true) + + override def quit: Boolean = + onAllConns(_.quit) forall (_ == true) + + // todo: implement + override def time[A](implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = ??? + + // todo: implement + override def randomkey[A](implicit parse: Parse[A]): Option[A] = ??? + + // todo: implement + override def select(index: Int): Boolean = ??? + + // todo: implement + override def move(key: Any, db: Int)(implicit format: Format): Boolean = ??? + + // todo: implement + override def auth(secret: Any)(implicit format: Format): Boolean = ??? + + // todo: implement + override def persist(key: Any)(implicit format: Format): Boolean = ??? + + // todo: implement + override def scan[A](cursor: Int, pattern: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = ??? + + // todo: implement + override def ping: Option[String] = ??? + + // todo: implement + override def watch(key: Any, keys: Any*)(implicit format: Format): Boolean = ??? + + // todo: implement + override def unwatch(): Boolean = ??? + + // todo: implement + override def getConfig(key: Any)(implicit format: Format): Option[Map[String, Option[String]]] = ??? + + // todo: implement + override def setConfig(key: Any, value: Any)(implicit format: Format): Option[String] = ??? + + // todo: implement + override def sort[A](key: String, limit: Option[(Int, Int)], desc: Boolean, alpha: Boolean, by: Option[String], get: List[String])(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = ??? + + // todo: implement + override def sortNStore[A](key: String, limit: Option[(Int, Int)], desc: Boolean, alpha: Boolean, by: Option[String], get: List[String], storeAt: String)(implicit format: Format, parse: Parse[A]): Option[Long] = ??? +} diff --git a/src/main/scala/com/redis/cluster/HashOps.scala b/src/main/scala/com/redis/cluster/HashOps.scala new file mode 100644 index 00000000..48dcdd8b --- /dev/null +++ b/src/main/scala/com/redis/cluster/HashOps.scala @@ -0,0 +1,56 @@ +package com.redis.cluster + +import com.redis.api.HashApi +import com.redis.serialization.{Format, Parse} + +trait HashOps extends HashApi { + rc: RedisClusterOps => + + override def hset(key: Any, field: Any, value: Any)(implicit format: Format): Boolean = + processForKey(key)(_.hset(key, field, value)) + + override def hset1(key: Any, field: Any, value: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.hset1(key, field, value)) + + override def hget[A](key: Any, field: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.hget[A](key, field)) + + override def hmset(key: Any, map: Iterable[Product2[Any, Any]])(implicit format: Format): Boolean = + processForKey(key)(_.hmset(key, map)) + + override def hmget[K, V](key: Any, fields: K*)(implicit format: Format, parseV: Parse[V]): Option[Map[K, V]] = + processForKey(key)(_.hmget[K, V](key, fields: _*)) + + override def hincrby(key: Any, field: Any, value: Long)(implicit format: Format): Option[Long] = + processForKey(key)(_.hincrby(key, field, value)) + + override def hexists(key: Any, field: Any)(implicit format: Format): Boolean = + processForKey(key)(_.hexists(key, field)) + + override def hdel(key: Any, field: Any, fields: Any*)(implicit format: Format): Option[Long] = + processForKey(key)(_.hdel(key, field, fields: _*)) + + override def hlen(key: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.hlen(key)) + + override def hkeys[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[List[A]] = + processForKey(key)(_.hkeys[A](key)) + + override def hvals[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[List[A]] = + processForKey(key)(_.hvals[A](key)) + + override def hgetall[K, V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[Map[K, V]] = + processForKey(key)(_.hgetall[K, V](key)) + + override def hgetall1[K, V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[Map[K, V]] = + processForKey(key)(_.hgetall1[K, V](key)) + + // todo: implement + override def hsetnx(key: Any, field: Any, value: Any)(implicit format: Format): Boolean = ??? + + // todo: implement + override def hincrbyfloat(key: Any, field: Any, value: Float)(implicit format: Format): Option[Float] = ??? + + // todo: implement + override def hscan[A](key: Any, cursor: Int, pattern: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = ??? +} diff --git a/src/main/scala/com/redis/cluster/ListOps.scala b/src/main/scala/com/redis/cluster/ListOps.scala new file mode 100644 index 00000000..6fc4a94d --- /dev/null +++ b/src/main/scala/com/redis/cluster/ListOps.scala @@ -0,0 +1,57 @@ +package com.redis.cluster + +import com.redis.api.ListApi +import com.redis.serialization.{Format, Parse} + +trait ListOps extends ListApi { + rc: RedisClusterOps => + + override def lpush(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = + processForKey(key)(_.lpush(key, value, values: _*)) + + override def rpush(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = + processForKey(key)(_.rpush(key, value, values: _*)) + + override def llen(key: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.llen(key)) + + override def lrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + processForKey(key)(_.lrange[A](key, start, end)) + + override def ltrim(key: Any, start: Int, end: Int)(implicit format: Format): Boolean = + processForKey(key)(_.ltrim(key, start, end)) + + override def lindex[A](key: Any, index: Int)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.lindex(key, index)) + + override def lset(key: Any, index: Int, value: Any)(implicit format: Format): Boolean = + processForKey(key)(_.lset(key, index, value)) + + override def lrem(key: Any, count: Int, value: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.lrem(key, count, value)) + + override def lpop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.lpop[A](key)) + + override def rpop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.rpop[A](key)) + + override def rpoplpush[A](srcKey: Any, dstKey: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + inSameNode(srcKey, dstKey) { n => n.rpoplpush[A](srcKey, dstKey) } + + override def brpoplpush[A](srcKey: Any, dstKey: Any, timeoutInSeconds: Int)(implicit format: Format, parse: Parse[A]): Option[A] = + inSameNode(srcKey, dstKey) { n => n.brpoplpush[A](srcKey, dstKey, timeoutInSeconds) } + + override def blpop[K, V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[(K, V)] = + inSameNode((key :: keys.toList): _*) { n => n.blpop[K, V](timeoutInSeconds, key, keys: _*) } + + override def brpop[K, V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]): Option[(K, V)] = + inSameNode((key :: keys.toList): _*) { n => n.brpop[K, V](timeoutInSeconds, key, keys: _*) } + + // todo: implement + override def lpushx(key: Any, value: Any)(implicit format: Format): Option[Long] = ??? + + // todo: implement + override def rpushx(key: Any, value: Any)(implicit format: Format): Option[Long] = ??? + +} diff --git a/src/main/scala/com/redis/cluster/NodeOps.scala b/src/main/scala/com/redis/cluster/NodeOps.scala new file mode 100644 index 00000000..65bb7a59 --- /dev/null +++ b/src/main/scala/com/redis/cluster/NodeOps.scala @@ -0,0 +1,31 @@ +package com.redis.cluster + +import com.redis.api.NodeApi + +trait NodeOps extends NodeApi { + rc: RedisClusterOps => + + override def save: Boolean = + onAllConns(_.save) forall (_ == true) + + override def bgsave: Boolean = + onAllConns(_.bgsave) forall (_ == true) + + override def shutdown: Boolean = + onAllConns(_.shutdown) forall (_ == true) + + override def bgrewriteaof: Boolean = + onAllConns(_.bgrewriteaof) forall (_ == true) + + // todo: implement + override def lastsave: Option[Long] = ??? + + // todo: implement + override def info: Option[String] = ??? + + // todo: implement + override def monitor: Boolean = ??? + + // todo: implement + override def slaveof(options: Any): Boolean = ??? +} diff --git a/src/main/scala/com/redis/cluster/RedisCluster.scala b/src/main/scala/com/redis/cluster/RedisCluster.scala index 416bc7f0..7b322250 100644 --- a/src/main/scala/com/redis/cluster/RedisCluster.scala +++ b/src/main/scala/com/redis/cluster/RedisCluster.scala @@ -19,11 +19,11 @@ import com.redis.serialization._ * redis-rb implements a regex based trick to achieve key-tagging. Here is the technique * explained in redis FAQ: * - * A key tag is a special pattern inside a key that, if preset, is the only part of the key - * hashed in order to select the server for this key. For example in order to hash the key - * "foo" I simply perform the CRC32 checksum of the whole string, but if this key has a - * pattern in the form of the characters {...} I only hash this substring. So for example - * for the key "foo{bared}" the key hashing code will simply perform the CRC32 of "bared". + * A key tag is a special pattern inside a key that, if preset, is the only part of the key + * hashed in order to select the server for this key. For example in order to hash the key + * "foo" I simply perform the CRC32 checksum of the whole string, but if this key has a + * pattern in the form of the characters {...} I only hash this substring. So for example + * for the key "foo{bared}" the key hashing code will simply perform the CRC32 of "bared". * This way using key tags you can ensure that related keys will be stored on the same Redis * instance just using the same key tag for all this keys. Redis-rb already implements key tags. * @@ -31,6 +31,7 @@ import com.redis.serialization._ trait KeyTag { def tag(key: Seq[Byte]): Option[Seq[Byte]] } + object RegexKeyTag extends KeyTag { val tagStart: Byte = '{'.toByte @@ -40,7 +41,7 @@ object RegexKeyTag extends KeyTag { val start = key.indexOf(tagStart) + 1 if (start > 0) { val end = key.indexOf(tagEnd, start) - if (end > -1) Some(key.slice(start,end)) else None + if (end > -1) Some(key.slice(start, end)) else None } else None } } @@ -53,22 +54,22 @@ object NoOpKeyTag extends KeyTag { * a level of abstraction for each node decoupling it from the address. A node is now identified * by a name, so functions like replaceServer works seamlessly. */ -case class ClusterNode(nodename: String, host: String, port: Int, database: Int = 0, maxIdle: Int = 8, secret: Option[Any] = None, timeout : Int = 0){ +case class ClusterNode(nodename: String, host: String, port: Int, database: Int = 0, maxIdle: Int = 8, secret: Option[Any] = None, timeout: Int = 0) { override def toString: String = nodename } -abstract class RedisCluster(hosts: ClusterNode*) extends RedisCommand { - - // not needed at cluster level - override val host = null - override val port = 0 - override val timeout = 0 - - // abstract val - val keyTag: Option[KeyTag] - - // default in libmemcached - val POINTS_PER_SERVER = 160 // default in libmemcached +abstract class RedisCluster(hosts: ClusterNode*) + extends RedisClusterOps + with BaseOps + with NodeOps + with StringOps + with ListOps + with SetOps + with SortedSetOps + // with GeoOps todo: implement GeoApi + // with EvalOps todo: implement EvalApi + // with HyperLogLogOps todo: implement HyperLogLogApi + with HashOps { // instantiating a cluster will automatically connect participating nodes to the server val clients: List[IdentifiableRedisClientPool] = hosts.toList.map { h => @@ -78,290 +79,39 @@ abstract class RedisCluster(hosts: ClusterNode*) extends RedisCommand { // the hash ring will instantiate with the nodes up and added val hr: HashRing[IdentifiableRedisClientPool] = HashRing[IdentifiableRedisClientPool](clients, POINTS_PER_SERVER) - // get node for the key - def nodeForKey(key: Any)(implicit format: Format): IdentifiableRedisClientPool = { + override def nodeForKey(key: Any)(implicit format: Format): IdentifiableRedisClientPool = { val bKey = format(key) hr.getNode(keyTag.flatMap(_.tag(bKey.toIndexedSeq)).getOrElse(bKey.toIndexedSeq)) } - - def processForKey[T](key: Any)(body: RedisCommand => T)(implicit format: Format): T = { - nodeForKey(key).withClient(body(_)) - } - // add a server - def addServer(server: ClusterNode): Unit = { + override def addServer(server: ClusterNode): Unit = { hr addNode new IdentifiableRedisClientPool(server) } - /** - * Use Case: Suppose I have a big list of key/value pairs which are replicated in 2 Redis servers - - * one having test values for every key and the other having production values for the same set of - * keys. In a cluster using replaceServer I can switch between test mode and production mode - * without disturbing the hash ring. This gives an additional level of abstraction on the node name - * decoupling it from the node address. - */ - - // replace a server - // very useful when you want to replace a server in test mode to one in production mode - def replaceServer(server: ClusterNode): Unit = { + override def replaceServer(server: ClusterNode): Unit = { hr replaceNode new IdentifiableRedisClientPool(server) match { - case Some(clientPool) => clientPool.close - case None => + case Some(clientPool) => clientPool.close + case None => } } - - //remove a server - def removeServer(nodename: String): Unit ={ + + override def removeServer(nodename: String): Unit = { hr.cluster.find(_.node.nodename.equals(nodename)) match { case Some(pool) => { - hr removeNode(pool) + hr removeNode (pool) pool.close } case None => } } - - //list all running servers + def listServers: List[ClusterNode] = { hr.cluster.map(_.node).toList } - /** - * Operations - */ - override def keys[A](pattern: Any = "*")(implicit format: Format, parse: Parse[A]) = - Some(hr.cluster.toList.flatMap(_.withClient(_.keys[A](pattern))).flatten) - - def onAllConns[T](body: RedisClient => T) = + override def onAllConns[T](body: RedisClient => T): Iterable[T] = hr.cluster.map(p => p.withClient { client => body(client) }) // .forall(_ == true) - override def flushdb: Boolean = onAllConns(_.flushdb) forall(_ == true) - override def flushall: Boolean = onAllConns(_.flushall) forall(_ == true) - override def quit: Boolean = onAllConns(_.quit) forall(_ == true) - def close = hr.cluster.map(_.close) - - override def rename(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = processForKey(oldkey)(_.rename(oldkey, newkey)) - override def renamenx(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = processForKey(oldkey)(_.renamenx(oldkey, newkey)) - override def dbsize: Option[Long] = - Some(onAllConns(_.dbsize).foldLeft(0L)((a, b) => b.map(a+).getOrElse(a))) - override def exists(key: Any)(implicit format: Format): Boolean = processForKey(key)(_.exists(key)) - override def del(key: Any, keys: Any*)(implicit format: Format): Option[Long] = - Some((key :: keys.toList).groupBy(nodeForKey).foldLeft(0L) { case (t,(n,ks)) => n.withClient{ client => client.del(ks.head,ks.tail:_*).map(t+).getOrElse(t)} }) - override def getType(key: Any)(implicit format: Format): Option[String] = processForKey(key)(_.getType(key)) - override def expire(key: Any, expiry: Int)(implicit format: Format): Boolean = - processForKey(key)(_.expire(key, expiry)) - override def expireat(key: Any, expiry: Long)(implicit format: Format): Boolean = - processForKey(key)(_.expireat(key, expiry)) - override def pexpire(key: Any, expiry: Int)(implicit format: Format): Boolean = - processForKey(key)(_.pexpire(key, expiry)) - override def pexpireat(key: Any, expiry: Long)(implicit format: Format): Boolean = - processForKey(key)(_.pexpireat(key, expiry)) - override def select(index: Int) = throw new UnsupportedOperationException("not supported on a cluster") - override def ttl(key: Any)(implicit format: Format): Option[Long] = processForKey(key)(_.ttl(key)) - override def pttl(key: Any)(implicit format: Format): Option[Long] = processForKey(key)(_.pttl(key)) - override def randomkey[A](implicit parse: Parse[A]) = throw new UnsupportedOperationException("not supported on a cluster") - override def randkey[A](implicit parse: Parse[A]) = throw new UnsupportedOperationException("not supported on a cluster") - - - /** - * NodeOperations - */ - override def save: Boolean = onAllConns(_.save) forall(_ == true) - override def bgsave: Boolean = onAllConns(_.bgsave) forall(_ == true) - override def shutdown: Boolean = onAllConns(_.shutdown) forall(_ == true) - override def bgrewriteaof: Boolean = onAllConns(_.bgrewriteaof) forall(_ == true) - - override def lastsave = throw new UnsupportedOperationException("not supported on a cluster") - override def monitor = throw new UnsupportedOperationException("not supported on a cluster") - override def info = throw new UnsupportedOperationException("not supported on a cluster") - override def slaveof(options: Any) = throw new UnsupportedOperationException("not supported on a cluster") - override def move(key: Any, db: Int)(implicit format: Format) = throw new UnsupportedOperationException("not supported on a cluster") - override def auth(secret: Any)(implicit format: Format) = throw new UnsupportedOperationException("not supported on a cluster") - - - /** - * StringOperations - */ - override def set(key: Any, value: Any)(implicit format: Format) = processForKey(key)(_.set(key, value)) - override def set(key: Any, value: Any, onlyIfExists: Boolean, time: SecondsOrMillis) = processForKey(key)(_.set(key, value, onlyIfExists, time)) - override def get[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.get(key)) - override def getset[A](key: Any, value: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.getset(key, value)) - override def setnx(key: Any, value: Any)(implicit format: Format) = processForKey(key)(_.setnx(key, value)) - override def setex(key: Any, expiry: Long, value: Any)(implicit format: Format) = processForKey(key)(_.setex(key, expiry, value)) - override def incr(key: Any)(implicit format: Format) = processForKey(key)(_.incr(key)) - override def incrby(key: Any, increment: Long)(implicit format: Format) = processForKey(key)(_.incrby(key, increment)) - override def decr(key: Any)(implicit format: Format) = processForKey(key)(_.decr(key)) - override def decrby(key: Any, increment: Long)(implicit format: Format) = processForKey(key)(_.decrby(key, increment)) - - override def mget[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = { - val keylist = (key :: keys.toList) - val kvs = for { - (n, ks) <- keylist.groupBy(nodeForKey) - vs <- n.withClient(_.mget[A](ks.head, ks.tail: _*).toList) - kv <- (ks).zip(vs) - } yield kv - Some(keylist.map(kvs)) - } - - override def mset(kvs: (Any, Any)*)(implicit format: Format) = kvs.toList.map{ case (k, v) => set(k, v) }.forall(_ == true) - override def msetnx(kvs: (Any, Any)*)(implicit format: Format) = kvs.toList.map{ case (k, v) => setnx(k, v) }.forall(_ == true) - - override def setrange(key: Any, offset: Int, value: Any)(implicit format: Format) = processForKey(key)(_.setrange(key, offset, value)) - override def getrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.getrange(key, start, end)) - override def strlen(key: Any)(implicit format: Format) = processForKey(key)(_.strlen(key)) - override def append(key: Any, value: Any)(implicit format: Format) = processForKey(key)(_.append(key, value)) - override def getbit(key: Any, offset: Int)(implicit format: Format) = processForKey(key)(_.getbit(key, offset)) - override def setbit(key: Any, offset: Int, value: Any)(implicit format: Format) = processForKey(key)(_.setbit(key, offset, value)) - override def bitop(op: String, destKey: Any, srcKeys: Any*)(implicit format: Format) = throw new UnsupportedOperationException("not supported on a cluster") - override def bitcount(key: Any, range: Option[(Int, Int)] = None)(implicit format: Format) = processForKey(key)(_.bitcount(key, range)) - - /** - * ListOperations - */ - override def lpush(key: Any, value: Any, values: Any*)(implicit format: Format) = processForKey(key)(_.lpush(key, value, values:_*)) - override def rpush(key: Any, value: Any, values: Any*)(implicit format: Format) = processForKey(key)(_.rpush(key, value, values:_*)) - override def llen(key: Any)(implicit format: Format) = processForKey(key)(_.llen(key)) - override def lrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.lrange[A](key, start, end)) - override def ltrim(key: Any, start: Int, end: Int)(implicit format: Format) = processForKey(key)(_.ltrim(key, start, end)) - override def lindex[A](key: Any, index: Int)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.lindex(key, index)) - override def lset(key: Any, index: Int, value: Any)(implicit format: Format) = processForKey(key)(_.lset(key, index, value)) - override def lrem(key: Any, count: Int, value: Any)(implicit format: Format) = processForKey(key)(_.lrem(key, count, value)) - override def lpop[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.lpop[A](key)) - override def rpop[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.rpop[A](key)) - override def rpoplpush[A](srcKey: Any, dstKey: Any)(implicit format: Format, parse: Parse[A]) = - inSameNode(srcKey, dstKey) {n => n.rpoplpush[A](srcKey, dstKey)} - override def brpoplpush[A](srcKey: Any, dstKey: Any, timeoutInSeconds: Int)(implicit format: Format, parse: Parse[A]) = - inSameNode(srcKey, dstKey) {n => n.brpoplpush[A](srcKey, dstKey, timeoutInSeconds)} - override def blpop[K,V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]) = - inSameNode((key :: keys.toList): _*) {n => n.blpop[K, V](timeoutInSeconds, key, keys:_*)} - override def brpop[K,V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]) = - inSameNode((key :: keys.toList): _*) {n => n.brpop[K, V](timeoutInSeconds, key, keys:_*)} - - private def inSameNode[T](keys: Any*)(body: RedisClient => T)(implicit format: Format): T = { - val nodes = keys.toList.map(nodeForKey(_)) - if (nodes.forall(_ == nodes.head)) { - nodes.head.withClient(body(_)) - } else { - throw new UnsupportedOperationException("can only occur if all keys map to same node") - } - } - - /** - * SetOperations - */ - override def sadd(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = - processForKey(key)(_.sadd(key, value, values:_*)) - override def srem(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = - processForKey(key)(_.srem(key, value, values:_*)) - override def spop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = - processForKey(key)(_.spop[A](key)) - - override def smove(sourceKey: Any, destKey: Any, value: Any)(implicit format: Format): Option[Long] = - inSameNode(sourceKey, destKey) {n => n.smove(sourceKey, destKey, value)} - - override def scard(key: Any)(implicit format: Format): Option[Long] = processForKey(key)(_.scard(key)) - override def sismember(key: Any, value: Any)(implicit format: Format): Boolean = - processForKey(key)(_.sismember(key, value)) - - override def sinter[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = - inSameNode((key :: keys.toList): _*) {n => n.sinter[A](key, keys: _*)} - - override def sinterstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = - inSameNode((key :: keys.toList): _*) {n => n.sinterstore(key, keys: _*)} - - override def sunion[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = - inSameNode((key :: keys.toList): _*) {n => n.sunion[A](key, keys: _*)} - - override def sunionstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = - inSameNode((key :: keys.toList): _*) {n => n.sunionstore(key, keys: _*)} - - override def sdiff[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = - inSameNode((key :: keys.toList): _*) {n => n.sdiff[A](key, keys: _*)} - - override def sdiffstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = - inSameNode((key :: keys.toList): _*) {n => n.sdiffstore(key, keys: _*)} - - override def smembers[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = - processForKey(key)(_.smembers(key)) - override def srandmember[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = - processForKey(key)(_.srandmember(key)) - override def srandmember[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = - processForKey(key)(_.srandmember(key, count)) - - - import com.redis.RedisClient._ - - /** - * SortedSetOperations - */ - override def zadd(key: Any, score: Double, member: Any, scoreVals: (Double, Any)*)(implicit format: Format) = - processForKey(key)(_.zadd(key, score, member, scoreVals:_*)) - override def zrem(key: Any, member: Any, members: Any*)(implicit format: Format): Option[Long] = - processForKey(key)(_.zrem(key, member, members: _*)) - override def zincrby(key: Any, incr: Double, member: Any)(implicit format: Format) = processForKey(key)(_.zincrby(key, incr, member)) - override def zcard(key: Any)(implicit format: Format) = processForKey(key)(_.zcard(key)) - override def zscore(key: Any, element: Any)(implicit format: Format) = processForKey(key)(_.zscore(key, element)) - override def zrange[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder )(implicit format: Format, parse: Parse[A]) = - processForKey(key)(_.zrange[A](key, start, end, sortAs)) - override def zrangeWithScore[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]) = - processForKey(key)(_.zrangeWithScore[A](key, start, end, sortAs)) - - override def zrangebyscore[A](key: Any, min: Double = Double.NegativeInfinity, - minInclusive: Boolean = true, max: Double = Double.PositiveInfinity, - maxInclusive: Boolean = true, limit: Option[(Int, Int)], - sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]) = - processForKey(key)(_.zrangebyscore[A](key, min, minInclusive, max, maxInclusive, limit, sortAs)) - - override def zrangebyscoreWithScore[A](key: Any, min: Double = Double.NegativeInfinity, - minInclusive: Boolean = true, max: Double = Double.PositiveInfinity, - maxInclusive: Boolean = true, limit: Option[(Int, Int)], - sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]) = - processForKey(key)(_.zrangebyscoreWithScore[A](key, min, minInclusive, max, maxInclusive, limit, sortAs)) - - override def zcount(key: Any, min: Double = Double.NegativeInfinity, max: Double = Double.PositiveInfinity, - minInclusive: Boolean = true, maxInclusive: Boolean = true)(implicit format: Format): Option[Long] = - processForKey(key)(_.zcount(key, min, max, minInclusive, maxInclusive)) - - override def zrank(key: Any, member: Any, reverse: Boolean = false)(implicit format: Format) = processForKey(key)(_.zrank(key, member, reverse)) - override def zremrangebyrank(key: Any, start: Int = 0, end: Int = -1)(implicit format: Format) = processForKey(key)(_.zremrangebyrank(key, start, end)) - override def zremrangebyscore(key: Any, start: Double = Double.NegativeInfinity, - end: Double = Double.PositiveInfinity)(implicit format: Format) = processForKey(key)(_.zremrangebyscore(key, start, end)) - - override def zunionstore(dstKey: Any, keys: Iterable[Any], - aggregate: Aggregate = SUM)(implicit format: Format) = inSameNode((dstKey :: keys.toList):_*) {n => - n.zunionstore(dstKey, keys, aggregate) - } - - override def zunionstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any,Double]], - aggregate: Aggregate = SUM)(implicit format: Format) = inSameNode((dstKey :: kws.map(_._1).toList):_*) {n => - n.zunionstoreWeighted(dstKey, kws, aggregate) - } - - override def zinterstore(dstKey: Any, keys: Iterable[Any], - aggregate: Aggregate = SUM)(implicit format: Format) = inSameNode((dstKey :: keys.toList):_*) {n => - n.zinterstore(dstKey, keys, aggregate) - } - - override def zinterstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any,Double]], - aggregate: Aggregate = SUM)(implicit format: Format) = inSameNode((dstKey :: kws.map(_._1).toList):_*) {n => - n.zinterstoreWeighted(dstKey, kws, aggregate) - } - + def close(): Unit = hr.cluster.map(_.close) - /** - * HashOperations - */ - override def hset(key: Any, field: Any, value: Any)(implicit format: Format) = processForKey(key)(_.hset(key, field, value)) - override def hset1(key: Any, field: Any, value: Any)(implicit format: Format) = processForKey(key)(_.hset1(key, field, value)) - override def hget[A](key: Any, field: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.hget[A](key, field)) - override def hmset(key: Any, map: Iterable[Product2[Any, Any]])(implicit format: Format) = processForKey(key)(_.hmset(key, map)) - override def hmget[K,V](key: Any, fields: K*)(implicit format: Format, parseV: Parse[V]) = processForKey(key)(_.hmget[K,V](key, fields:_*)) - override def hincrby(key: Any, field: Any, value: Long)(implicit format: Format) = processForKey(key)(_.hincrby(key, field, value)) - override def hexists(key: Any, field: Any)(implicit format: Format) = processForKey(key)(_.hexists(key, field)) - override def hdel(key: Any, field: Any, fields: Any*)(implicit format: Format): Option[Long] = processForKey(key)(_.hdel(key, field, fields:_*)) - override def hlen(key: Any)(implicit format: Format) = processForKey(key)(_.hlen(key)) - override def hkeys[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.hkeys[A](key)) - override def hvals[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.hvals[A](key)) - override def hgetall[K,V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]) = processForKey(key)(_.hgetall[K,V](key)) - override def hgetall1[K,V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]) = processForKey(key)(_.hgetall1[K,V](key)) } diff --git a/src/main/scala/com/redis/cluster/RedisClusterOps.scala b/src/main/scala/com/redis/cluster/RedisClusterOps.scala new file mode 100644 index 00000000..9e7d0753 --- /dev/null +++ b/src/main/scala/com/redis/cluster/RedisClusterOps.scala @@ -0,0 +1,60 @@ +package com.redis.cluster + +import com.redis.serialization.Format +import com.redis.{RedisClient, RedisClientPool, RedisCommand} + +trait RedisClusterOps extends AutoCloseable { + + val keyTag: Option[KeyTag] + + protected val POINTS_PER_SERVER = 160 // default in libmemcached + + /** + * get node for the key + */ + protected def nodeForKey(key: Any)(implicit format: Format): RedisClientPool + + protected def onAllConns[T](body: RedisClient => T): Iterable[T] + + /** + * add server to internal pool + */ + def addServer(server: ClusterNode): Unit + + /** + * replace a server + * very useful when you want to replace a server in test mode to one in production mode + * + * Use Case: Suppose I have a big list of key/value pairs which are replicated in 2 Redis servers - + * one having test values for every key and the other having production values for the same set of + * keys. In a cluster using replaceServer I can switch between test mode and production mode + * without disturbing the hash ring. This gives an additional level of abstraction on the node name + * decoupling it from the node address. + */ + def replaceServer(server: ClusterNode): Unit + + /** + * remove a server + */ + def removeServer(nodename: String): Unit + + /** + * list all running servers + */ + def listServers: List[ClusterNode] + + def processForKey[T](key: Any)(body: RedisCommand => T)(implicit format: Format): T = { + nodeForKey(key).withClient(body(_)) + } + + def inSameNode[T](keys: Any*)(body: RedisClient => T)(implicit format: Format): T = { + val nodes = keys.toList.map(nodeForKey(_)) + if (nodes.forall(_ == nodes.head)) { + nodes.head.withClient(body(_)) + } else { + throw new UnsupportedOperationException("can only occur if all keys map to same node") + } + } + + +} diff --git a/src/main/scala/com/redis/cluster/RedisShards.scala b/src/main/scala/com/redis/cluster/RedisShards.scala index e1fc215f..50d30e22 100644 --- a/src/main/scala/com/redis/cluster/RedisShards.scala +++ b/src/main/scala/com/redis/cluster/RedisShards.scala @@ -3,18 +3,19 @@ package com.redis.cluster import com.redis._ import com.redis.serialization._ -abstract class RedisShards(val hosts: List[ClusterNode]) extends RedisCommand { +abstract class RedisShards(val hosts: List[ClusterNode]) + extends RedisClusterOps + with BaseOps + with NodeOps + with StringOps + with ListOps + with SetOps + with SortedSetOps + // with GeoOps todo: implement GeoApi + // with EvalOps todo: implement EvalApi + // with HyperLogLogOps todo: implement HyperLogLogApi + with HashOps { - // not needed at cluster level - override val host = null - override val port = 0 - override val timeout = 0 - - // abstract val - val keyTag: Option[KeyTag] - - // default in libmemcached - val POINTS_PER_SERVER = 160 // default in libmemcached // instantiating a cluster will automatically connect participating nodes to the server private var clients = hosts.map { h => (h.nodename, new IdentifiableRedisClientPool(h)) } toMap @@ -22,283 +23,41 @@ abstract class RedisShards(val hosts: List[ClusterNode]) extends RedisCommand { // the hash ring will instantiate with the nodes up and added val hr: HashRing[String] = HashRing[String](hosts.map(_.nodename), POINTS_PER_SERVER) - // get node for the key - def nodeForKey(key: Any)(implicit format: Format): RedisClientPool = { + override def nodeForKey(key: Any)(implicit format: Format): RedisClientPool = { val bKey = format(key) val selectedNode = hr.getNode(keyTag.flatMap(_.tag(bKey.toIndexedSeq)).getOrElse(bKey.toIndexedSeq)) clients(selectedNode) } - - def processForKey[T](key: Any)(body: RedisCommand => T)(implicit format: Format): T = { - nodeForKey(key).withClient(body(_)) - } - // add a server - def addServer(server: ClusterNode): Unit = { + override def addServer(server: ClusterNode): Unit = { clients = clients + (server.nodename -> new IdentifiableRedisClientPool(server)) hr addNode server.nodename } - // replace a server - def replaceServer(server: ClusterNode): Unit = { + override def replaceServer(server: ClusterNode): Unit = { if (clients.contains(server.nodename)) { clients(server.nodename).close clients = clients - server.nodename } clients = clients + (server.nodename -> new IdentifiableRedisClientPool(server)) } - - //remove a server - def removeServer(nodename: String): Unit ={ + + override def removeServer(nodename: String): Unit = { if (clients.contains(nodename)) { val pool = clients(nodename) pool.close clients = clients - nodename - hr removeNode(nodename) - } + hr removeNode (nodename) + } } - - //list all running servers + def listServers: List[ClusterNode] = { clients.values.map(_.node).toList } - /** - * Operations - */ - override def keys[A](pattern: Any = "*")(implicit format: Format, parse: Parse[A]) = - Some(clients.values.toList.flatMap(_.withClient(_.keys[A](pattern))).flatten) - - def onAllConns[T](body: RedisClient => T) = + override def onAllConns[T](body: RedisClient => T): Iterable[T] = clients.values.map(p => p.withClient { client => body(client) }) // .forall(_ == true) - override def flushdb: Boolean = onAllConns(_.flushdb) forall(_ == true) - override def flushall: Boolean = onAllConns(_.flushall) forall(_ == true) - override def quit: Boolean = onAllConns(_.quit) forall(_ == true) - def close = clients.values.map(_.close) - - override def rename(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = processForKey(oldkey)(_.rename(oldkey, newkey)) - override def renamenx(oldkey: Any, newkey: Any)(implicit format: Format): Boolean = processForKey(oldkey)(_.renamenx(oldkey, newkey)) - override def dbsize: Option[Long] = - Some(onAllConns(_.dbsize).foldLeft(0L)((a, b) => b.map(a+).getOrElse(a))) - override def exists(key: Any)(implicit format: Format): Boolean = processForKey(key)(_.exists(key)) - override def del(key: Any, keys: Any*)(implicit format: Format): Option[Long] = - Some((key :: keys.toList).groupBy(nodeForKey).foldLeft(0L) { case (t,(n,ks)) => n.withClient{ client => client.del(ks.head,ks.tail:_*).map(t+).getOrElse(t)} }) - override def getType(key: Any)(implicit format: Format): Option[String] = processForKey(key)(_.getType(key)) - override def expire(key: Any, expiry: Int)(implicit format: Format): Boolean = - processForKey(key)(_.expire(key, expiry)) - override def expireat(key: Any, expiry: Long)(implicit format: Format): Boolean = - processForKey(key)(_.expireat(key, expiry)) - override def pexpire(key: Any, expiry: Int)(implicit format: Format): Boolean = - processForKey(key)(_.pexpire(key, expiry)) - override def pexpireat(key: Any, expiry: Long)(implicit format: Format): Boolean = - processForKey(key)(_.pexpireat(key, expiry)) - override def select(index: Int) = throw new UnsupportedOperationException("not supported on a cluster") - override def ttl(key: Any)(implicit format: Format): Option[Long] = processForKey(key)(_.ttl(key)) - override def pttl(key: Any)(implicit format: Format): Option[Long] = processForKey(key)(_.pttl(key)) - override def randomkey[A](implicit parse: Parse[A]) = throw new UnsupportedOperationException("not supported on a cluster") - override def randkey[A](implicit parse: Parse[A]) = throw new UnsupportedOperationException("not supported on a cluster") - - - /** - * NodeOperations - */ - override def save: Boolean = onAllConns(_.save) forall(_ == true) - override def bgsave: Boolean = onAllConns(_.bgsave) forall(_ == true) - override def shutdown: Boolean = onAllConns(_.shutdown) forall(_ == true) - override def bgrewriteaof: Boolean = onAllConns(_.bgrewriteaof) forall(_ == true) - - override def lastsave = throw new UnsupportedOperationException("not supported on a cluster") - override def monitor = throw new UnsupportedOperationException("not supported on a cluster") - override def info = throw new UnsupportedOperationException("not supported on a cluster") - override def slaveof(options: Any) = throw new UnsupportedOperationException("not supported on a cluster") - override def move(key: Any, db: Int)(implicit format: Format) = throw new UnsupportedOperationException("not supported on a cluster") - override def auth(secret: Any)(implicit format: Format) = throw new UnsupportedOperationException("not supported on a cluster") - - - /** - * StringOperations - */ - override def set(key: Any, value: Any)(implicit format: Format) = processForKey(key)(_.set(key, value)) - override def set(key: Any, value: Any, onlyIfExists: Boolean, time: SecondsOrMillis) = processForKey(key)(_.set(key, value, onlyIfExists, time)) - override def get[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.get(key)) - override def getset[A](key: Any, value: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.getset(key, value)) - override def setnx(key: Any, value: Any)(implicit format: Format) = processForKey(key)(_.setnx(key, value)) - override def setex(key: Any, expiry: Long, value: Any)(implicit format: Format) = processForKey(key)(_.setex(key, expiry, value)) - override def incr(key: Any)(implicit format: Format) = processForKey(key)(_.incr(key)) - override def incrby(key: Any, increment: Long)(implicit format: Format) = processForKey(key)(_.incrby(key, increment)) - override def decr(key: Any)(implicit format: Format) = processForKey(key)(_.decr(key)) - override def decrby(key: Any, increment: Long)(implicit format: Format) = processForKey(key)(_.decrby(key, increment)) - - override def mget[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = { - val keylist = (key :: keys.toList) - val kvs = for { - (n, ks) <- keylist.groupBy(nodeForKey) - vs <- n.withClient(_.mget[A](ks.head, ks.tail: _*).toList) - kv <- (ks).zip(vs) - } yield kv - Some(keylist.map(kvs)) - } - - override def mset(kvs: (Any, Any)*)(implicit format: Format) = kvs.toList.map{ case (k, v) => set(k, v) }.forall(_ == true) - override def msetnx(kvs: (Any, Any)*)(implicit format: Format) = kvs.toList.map{ case (k, v) => setnx(k, v) }.forall(_ == true) - - override def setrange(key: Any, offset: Int, value: Any)(implicit format: Format) = processForKey(key)(_.setrange(key, offset, value)) - override def getrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.getrange(key, start, end)) - override def strlen(key: Any)(implicit format: Format) = processForKey(key)(_.strlen(key)) - override def append(key: Any, value: Any)(implicit format: Format) = processForKey(key)(_.append(key, value)) - override def getbit(key: Any, offset: Int)(implicit format: Format) = processForKey(key)(_.getbit(key, offset)) - override def setbit(key: Any, offset: Int, value: Any)(implicit format: Format) = processForKey(key)(_.setbit(key, offset, value)) - override def bitop(op: String, destKey: Any, srcKeys: Any*)(implicit format: Format) = throw new UnsupportedOperationException("not supported on a cluster") - override def bitcount(key: Any, range: Option[(Int, Int)] = None)(implicit format: Format) = processForKey(key)(_.bitcount(key, range)) - - /** - * ListOperations - */ - override def lpush(key: Any, value: Any, values: Any*)(implicit format: Format) = processForKey(key)(_.lpush(key, value, values:_*)) - override def rpush(key: Any, value: Any, values: Any*)(implicit format: Format) = processForKey(key)(_.rpush(key, value, values:_*)) - override def llen(key: Any)(implicit format: Format) = processForKey(key)(_.llen(key)) - override def lrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.lrange[A](key, start, end)) - override def ltrim(key: Any, start: Int, end: Int)(implicit format: Format) = processForKey(key)(_.ltrim(key, start, end)) - override def lindex[A](key: Any, index: Int)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.lindex(key, index)) - override def lset(key: Any, index: Int, value: Any)(implicit format: Format) = processForKey(key)(_.lset(key, index, value)) - override def lrem(key: Any, count: Int, value: Any)(implicit format: Format) = processForKey(key)(_.lrem(key, count, value)) - override def lpop[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.lpop[A](key)) - override def rpop[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.rpop[A](key)) - override def rpoplpush[A](srcKey: Any, dstKey: Any)(implicit format: Format, parse: Parse[A]) = - inSameNode(srcKey, dstKey) {n => n.rpoplpush[A](srcKey, dstKey)} - override def brpoplpush[A](srcKey: Any, dstKey: Any, timeoutInSeconds: Int)(implicit format: Format, parse: Parse[A]) = - inSameNode(srcKey, dstKey) {n => n.brpoplpush[A](srcKey, dstKey, timeoutInSeconds)} - override def blpop[K,V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]) = - inSameNode((key :: keys.toList): _*) {n => n.blpop[K, V](timeoutInSeconds, key, keys:_*)} - override def brpop[K,V](timeoutInSeconds: Int, key: K, keys: K*)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]) = - inSameNode((key :: keys.toList): _*) {n => n.brpop[K, V](timeoutInSeconds, key, keys:_*)} - - private def inSameNode[T](keys: Any*)(body: RedisClient => T)(implicit format: Format): T = { - val nodes = keys.toList.map(nodeForKey(_)) - nodes.forall(_ == nodes.head) match { - case true => nodes.head.withClient(body(_)) // all nodes equal - case _ => - throw new UnsupportedOperationException("can only occur if all keys map to same node") - } - } - - /** - * SetOperations - */ - override def sadd(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = - processForKey(key)(_.sadd(key, value, values:_*)) - override def srem(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = - processForKey(key)(_.srem(key, value, values:_*)) - override def spop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = - processForKey(key)(_.spop[A](key)) - - override def smove(sourceKey: Any, destKey: Any, value: Any)(implicit format: Format): Option[Long] = - inSameNode(sourceKey, destKey) {n => n.smove(sourceKey, destKey, value)} - - override def scard(key: Any)(implicit format: Format): Option[Long] = processForKey(key)(_.scard(key)) - override def sismember(key: Any, value: Any)(implicit format: Format): Boolean = - processForKey(key)(_.sismember(key, value)) - - override def sinter[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = - inSameNode((key :: keys.toList): _*) {n => n.sinter[A](key, keys: _*)} - - override def sinterstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = - inSameNode((key :: keys.toList): _*) {n => n.sinterstore(key, keys: _*)} - - override def sunion[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = - inSameNode((key :: keys.toList): _*) {n => n.sunion[A](key, keys: _*)} - - override def sunionstore(key: Any, keys: Any*)(implicit format: Format) = - inSameNode((key :: keys.toList): _*) {n => n.sunionstore(key, keys: _*)} - - override def sdiff[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = - inSameNode((key :: keys.toList): _*) {n => n.sdiff[A](key, keys: _*)} - - override def sdiffstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = - inSameNode((key :: keys.toList): _*) {n => n.sdiffstore(key, keys: _*)} - - override def smembers[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = - processForKey(key)(_.smembers(key)) - override def srandmember[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = - processForKey(key)(_.srandmember(key)) - override def srandmember[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = - processForKey(key)(_.srandmember(key, count)) - - - import com.redis.RedisClient._ - - /** - * SortedSetOperations - */ - override def zadd(key: Any, score: Double, member: Any, scoreVals: (Double, Any)*)(implicit format: Format) = - processForKey(key)(_.zadd(key, score, member, scoreVals:_*)) - override def zrem(key: Any, member: Any, members: Any*)(implicit format: Format): Option[Long] = - processForKey(key)(_.zrem(key, member, members: _*)) - override def zincrby(key: Any, incr: Double, member: Any)(implicit format: Format) = processForKey(key)(_.zincrby(key, incr, member)) - override def zcard(key: Any)(implicit format: Format) = processForKey(key)(_.zcard(key)) - override def zscore(key: Any, element: Any)(implicit format: Format) = processForKey(key)(_.zscore(key, element)) - override def zrange[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder )(implicit format: Format, parse: Parse[A]) = - processForKey(key)(_.zrange[A](key, start, end, sortAs)) - override def zrangeWithScore[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]) = - processForKey(key)(_.zrangeWithScore[A](key, start, end, sortAs)) - - override def zrangebyscore[A](key: Any, min: Double = Double.NegativeInfinity, - minInclusive: Boolean = true, max: Double = Double.PositiveInfinity, - maxInclusive: Boolean = true, limit: Option[(Int, Int)], - sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]) = - processForKey(key)(_.zrangebyscore[A](key, min, minInclusive, max, maxInclusive, limit, sortAs)) - - override def zrangebyscoreWithScore[A](key: Any, min: Double = Double.NegativeInfinity, - minInclusive: Boolean = true, max: Double = Double.PositiveInfinity, - maxInclusive: Boolean = true, limit: Option[(Int, Int)], - sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]) = - processForKey(key)(_.zrangebyscoreWithScore[A](key, min, minInclusive, max, maxInclusive, limit, sortAs)) - - override def zcount(key: Any, min: Double = Double.NegativeInfinity, max: Double = Double.PositiveInfinity, - minInclusive: Boolean = true, maxInclusive: Boolean = true)(implicit format: Format): Option[Long] = - processForKey(key)(_.zcount(key, min, max, minInclusive, maxInclusive)) - - override def zrank(key: Any, member: Any, reverse: Boolean = false)(implicit format: Format) = processForKey(key)(_.zrank(key, member, reverse)) - override def zremrangebyrank(key: Any, start: Int = 0, end: Int = -1)(implicit format: Format) = processForKey(key)(_.zremrangebyrank(key, start, end)) - override def zremrangebyscore(key: Any, start: Double = Double.NegativeInfinity, - end: Double = Double.PositiveInfinity)(implicit format: Format) = processForKey(key)(_.zremrangebyscore(key, start, end)) - - override def zunionstore(dstKey: Any, keys: Iterable[Any], - aggregate: Aggregate = SUM)(implicit format: Format) = inSameNode((dstKey :: keys.toList):_*) {n => - n.zunionstore(dstKey, keys, aggregate) - } - - override def zunionstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any,Double]], - aggregate: Aggregate = SUM)(implicit format: Format) = inSameNode((dstKey :: kws.map(_._1).toList):_*) {n => - n.zunionstoreWeighted(dstKey, kws, aggregate) - } - - override def zinterstore(dstKey: Any, keys: Iterable[Any], - aggregate: Aggregate = SUM)(implicit format: Format) = inSameNode((dstKey :: keys.toList):_*) {n => - n.zinterstore(dstKey, keys, aggregate) - } - - override def zinterstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any,Double]], - aggregate: Aggregate = SUM)(implicit format: Format) = inSameNode((dstKey :: kws.map(_._1).toList):_*) {n => - n.zinterstoreWeighted(dstKey, kws, aggregate) - } - + def close(): Unit = clients.values.map(_.close) - /** - * HashOperations - */ - override def hset(key: Any, field: Any, value: Any)(implicit format: Format) = processForKey(key)(_.hset(key, field, value)) - override def hset1(key: Any, field: Any, value: Any)(implicit format: Format) = processForKey(key)(_.hset1(key, field, value)) - override def hget[A](key: Any, field: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.hget[A](key, field)) - override def hmset(key: Any, map: Iterable[Product2[Any, Any]])(implicit format: Format) = processForKey(key)(_.hmset(key, map)) - override def hmget[K,V](key: Any, fields: K*)(implicit format: Format, parseV: Parse[V]) = processForKey(key)(_.hmget[K,V](key, fields:_*)) - override def hincrby(key: Any, field: Any, value: Long)(implicit format: Format) = processForKey(key)(_.hincrby(key, field, value)) - override def hexists(key: Any, field: Any)(implicit format: Format) = processForKey(key)(_.hexists(key, field)) - override def hdel(key: Any, field: Any, fields: Any*)(implicit format: Format): Option[Long] = processForKey(key)(_.hdel(key, field, fields:_*)) - override def hlen(key: Any)(implicit format: Format) = processForKey(key)(_.hlen(key)) - override def hkeys[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.hkeys[A](key)) - override def hvals[A](key: Any)(implicit format: Format, parse: Parse[A]) = processForKey(key)(_.hvals[A](key)) - override def hgetall[K,V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]) = processForKey(key)(_.hgetall[K,V](key)) - override def hgetall1[K,V](key: Any)(implicit format: Format, parseK: Parse[K], parseV: Parse[V]) = processForKey(key)(_.hgetall1[K,V](key)) } diff --git a/src/main/scala/com/redis/cluster/SetOps.scala b/src/main/scala/com/redis/cluster/SetOps.scala new file mode 100644 index 00000000..c3706639 --- /dev/null +++ b/src/main/scala/com/redis/cluster/SetOps.scala @@ -0,0 +1,58 @@ +package com.redis.cluster + +import com.redis.api.SetApi +import com.redis.serialization.{Format, Parse} + +trait SetOps extends SetApi { + rc: RedisClusterOps => + + override def sadd(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = + processForKey(key)(_.sadd(key, value, values: _*)) + + override def srem(key: Any, value: Any, values: Any*)(implicit format: Format): Option[Long] = + processForKey(key)(_.srem(key, value, values: _*)) + + override def spop[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.spop[A](key)) + + override def smove(sourceKey: Any, destKey: Any, value: Any)(implicit format: Format): Option[Long] = + inSameNode(sourceKey, destKey) { n => n.smove(sourceKey, destKey, value) } + + override def scard(key: Any)(implicit format: Format): Option[Long] = processForKey(key)(_.scard(key)) + + override def sismember(key: Any, value: Any)(implicit format: Format): Boolean = + processForKey(key)(_.sismember(key, value)) + + override def sinter[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + inSameNode((key :: keys.toList): _*) { n => n.sinter[A](key, keys: _*) } + + override def sinterstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = + inSameNode((key :: keys.toList): _*) { n => n.sinterstore(key, keys: _*) } + + override def sunion[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + inSameNode((key :: keys.toList): _*) { n => n.sunion[A](key, keys: _*) } + + override def sunionstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = + inSameNode((key :: keys.toList): _*) { n => n.sunionstore(key, keys: _*) } + + override def sdiff[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + inSameNode((key :: keys.toList): _*) { n => n.sdiff[A](key, keys: _*) } + + override def sdiffstore(key: Any, keys: Any*)(implicit format: Format): Option[Long] = + inSameNode((key :: keys.toList): _*) { n => n.sdiffstore(key, keys: _*) } + + override def smembers[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = + processForKey(key)(_.smembers(key)) + + override def srandmember[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.srandmember(key)) + + override def srandmember[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = + processForKey(key)(_.srandmember(key, count)) + + // todo: implement + override def spop[A](key: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[Set[Option[A]]] = ??? + + // todo: implement + override def sscan[A](key: Any, cursor: Int, pattern: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = ??? +} diff --git a/src/main/scala/com/redis/cluster/SortedSetOps.scala b/src/main/scala/com/redis/cluster/SortedSetOps.scala new file mode 100644 index 00000000..6b07b7e4 --- /dev/null +++ b/src/main/scala/com/redis/cluster/SortedSetOps.scala @@ -0,0 +1,86 @@ +package com.redis.cluster + +import com.redis.RedisClient.{ASC, Aggregate, SUM, SortOrder} +import com.redis.api.SortedSetApi +import com.redis.serialization.{Format, Parse} + +trait SortedSetOps extends SortedSetApi { + rc: RedisClusterOps => + + override def zadd(key: Any, score: Double, member: Any, scoreVals: (Double, Any)*)(implicit format: Format): Option[Long] = + processForKey(key)(_.zadd(key, score, member, scoreVals: _*)) + + override def zrem(key: Any, member: Any, members: Any*)(implicit format: Format): Option[Long] = + processForKey(key)(_.zrem(key, member, members: _*)) + + override def zincrby(key: Any, incr: Double, member: Any)(implicit format: Format): Option[Double] = + processForKey(key)(_.zincrby(key, incr, member)) + + override def zcard(key: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.zcard(key)) + + override def zscore(key: Any, element: Any)(implicit format: Format): Option[Double] = + processForKey(key)(_.zscore(key, element)) + + override def zrange[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder)(implicit format: Format, parse: Parse[A]): Option[List[A]] = + processForKey(key)(_.zrange[A](key, start, end, sortAs)) + + override def zrangeWithScore[A](key: Any, start: Int = 0, end: Int = -1, sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[(A, Double)]] = + processForKey(key)(_.zrangeWithScore[A](key, start, end, sortAs)) + + override def zrangebyscore[A](key: Any, min: Double = Double.NegativeInfinity, + minInclusive: Boolean = true, max: Double = Double.PositiveInfinity, + maxInclusive: Boolean = true, limit: Option[(Int, Int)], + sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[A]] = + processForKey(key)(_.zrangebyscore[A](key, min, minInclusive, max, maxInclusive, limit, sortAs)) + + override def zrangebyscoreWithScore[A](key: Any, min: Double = Double.NegativeInfinity, + minInclusive: Boolean = true, max: Double = Double.PositiveInfinity, + maxInclusive: Boolean = true, limit: Option[(Int, Int)], + sortAs: SortOrder = ASC)(implicit format: Format, parse: Parse[A]): Option[List[(A, Double)]] = + processForKey(key)(_.zrangebyscoreWithScore[A](key, min, minInclusive, max, maxInclusive, limit, sortAs)) + + override def zcount(key: Any, min: Double = Double.NegativeInfinity, max: Double = Double.PositiveInfinity, + minInclusive: Boolean = true, maxInclusive: Boolean = true)(implicit format: Format): Option[Long] = + processForKey(key)(_.zcount(key, min, max, minInclusive, maxInclusive)) + + override def zrank(key: Any, member: Any, reverse: Boolean = false)(implicit format: Format): Option[Long] = + processForKey(key)(_.zrank(key, member, reverse)) + + override def zremrangebyrank(key: Any, start: Int = 0, end: Int = -1)(implicit format: Format): Option[Long] = + processForKey(key)(_.zremrangebyrank(key, start, end)) + + override def zremrangebyscore(key: Any, start: Double = Double.NegativeInfinity, + end: Double = Double.PositiveInfinity)(implicit format: Format): Option[Long] = + processForKey(key)(_.zremrangebyscore(key, start, end)) + + override def zunionstore(dstKey: Any, keys: Iterable[Any], + aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = + inSameNode((dstKey :: keys.toList): _*) { n => + n.zunionstore(dstKey, keys, aggregate) + } + + override def zunionstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any, Double]], + aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = + inSameNode((dstKey :: kws.map(_._1).toList): _*) { n => + n.zunionstoreWeighted(dstKey, kws, aggregate) + } + + override def zinterstore(dstKey: Any, keys: Iterable[Any], + aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = + inSameNode((dstKey :: keys.toList): _*) { n => + n.zinterstore(dstKey, keys, aggregate) + } + + override def zinterstoreWeighted(dstKey: Any, kws: Iterable[Product2[Any, Double]], + aggregate: Aggregate = SUM)(implicit format: Format): Option[Long] = + inSameNode((dstKey :: kws.map(_._1).toList): _*) { n => + n.zinterstoreWeighted(dstKey, kws, aggregate) + } + + // todo: implement + override def zrangebylex[A](key: Any, min: String, max: String, limit: Option[(Int, Int)])(implicit format: Format, parse: Parse[A]): Option[List[A]] = ??? + + // todo: implement + override def zscan[A](key: Any, cursor: Int, pattern: Any, count: Int)(implicit format: Format, parse: Parse[A]): Option[(Option[Int], Option[List[Option[A]]])] = ??? +} diff --git a/src/main/scala/com/redis/cluster/StringOps.scala b/src/main/scala/com/redis/cluster/StringOps.scala new file mode 100644 index 00000000..6b9b601d --- /dev/null +++ b/src/main/scala/com/redis/cluster/StringOps.scala @@ -0,0 +1,84 @@ +package com.redis.cluster + +import com.redis +import com.redis.api.StringApi +import com.redis.serialization.{Format, Parse} + +trait StringOps extends StringApi { + rc: RedisClusterOps => + + override def set(key: Any, value: Any)(implicit format: Format): Boolean = + processForKey(key)(_.set(key, value)) + + override def set(key: Any, value: Any, onlyIfExists: Boolean, time: redis.SecondsOrMillis): Boolean = + processForKey(key)(_.set(key, value, onlyIfExists, time)) + + override def get[A](key: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.get(key)) + + override def getset[A](key: Any, value: Any)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.getset(key, value)) + + override def setnx(key: Any, value: Any)(implicit format: Format): Boolean = + processForKey(key)(_.setnx(key, value)) + + override def setex(key: Any, expiry: Long, value: Any)(implicit format: Format): Boolean = + processForKey(key)(_.setex(key, expiry, value)) + + override def psetex(key: Any, expiryInMillis: Long, value: Any)(implicit format: Format): Boolean = + processForKey(key)(_.psetex(key, expiryInMillis, value)) + + override def incr(key: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.incr(key)) + + override def incrby(key: Any, increment: Long)(implicit format: Format): Option[Long] = + processForKey(key)(_.incrby(key, increment)) + + override def incrbyfloat(key: Any, increment: Float)(implicit format: Format): Option[Float] = + processForKey(key)(_.incrbyfloat(key, increment)) + + override def decr(key: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.decr(key)) + + override def decrby(key: Any, increment: Long)(implicit format: Format): Option[Long] = + processForKey(key)(_.decrby(key, increment)) + + override def mget[A](key: Any, keys: Any*)(implicit format: Format, parse: Parse[A]): Option[List[Option[A]]] = { + val keylist = (key :: keys.toList) + val kvs = for { + (n, ks) <- keylist.groupBy(nodeForKey) + vs <- n.withClient(_.mget[A](ks.head, ks.tail: _*).toList) + kv <- (ks).zip(vs) + } yield kv + Some(keylist.map(kvs)) + } + + override def mset(kvs: (Any, Any)*)(implicit format: Format): Boolean = + kvs.toList.map { case (k, v) => set(k, v) }.forall(_ == true) + + override def msetnx(kvs: (Any, Any)*)(implicit format: Format): Boolean = + kvs.toList.map { case (k, v) => setnx(k, v) }.forall(_ == true) + + override def setrange(key: Any, offset: Int, value: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.setrange(key, offset, value)) + + override def getrange[A](key: Any, start: Int, end: Int)(implicit format: Format, parse: Parse[A]): Option[A] = + processForKey(key)(_.getrange(key, start, end)) + + override def strlen(key: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.strlen(key)) + + override def append(key: Any, value: Any)(implicit format: Format): Option[Long] = + processForKey(key)(_.append(key, value)) + + override def getbit(key: Any, offset: Int)(implicit format: Format): Option[Int] = + processForKey(key)(_.getbit(key, offset)) + + override def setbit(key: Any, offset: Int, value: Any)(implicit format: Format): Option[Int] = + processForKey(key)(_.setbit(key, offset, value)) + + override def bitop(op: String, destKey: Any, srcKeys: Any*)(implicit format: Format): Option[Int] = ??? // todo: implement + + override def bitcount(key: Any, range: Option[(Int, Int)])(implicit format: Format): Option[Int] = + processForKey(key)(_.bitcount(key, range)) +} diff --git a/src/test/scala/com/redis/cluster/RedisClusterSpec.scala b/src/test/scala/com/redis/cluster/RedisClusterSpec.scala index 861d8484..48d76ba2 100644 --- a/src/test/scala/com/redis/cluster/RedisClusterSpec.scala +++ b/src/test/scala/com/redis/cluster/RedisClusterSpec.scala @@ -6,8 +6,8 @@ import org.scalatest.BeforeAndAfterAll import org.scalatest.Matchers import org.scalatest.junit.JUnitRunner import org.junit.runner.RunWith -import com.redis.RedisClient -import com.redis.serialization.Format +import com.redis.{GeoRadiusMember, RedisClient} +import com.redis.serialization.{Format, Parse} import collection.mutable.WrappedArray diff --git a/src/test/scala/com/redis/cluster/RedisShardsSpec.scala b/src/test/scala/com/redis/cluster/RedisShardsSpec.scala index a16b00b7..4a1da10a 100644 --- a/src/test/scala/com/redis/cluster/RedisShardsSpec.scala +++ b/src/test/scala/com/redis/cluster/RedisShardsSpec.scala @@ -1,14 +1,10 @@ package com.redis.cluster -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith import com.redis.RedisClient import com.redis.serialization.Format -import collection.mutable.WrappedArray +import org.junit.runner.RunWith +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) From 4f1150e927cd5db0f52dfceae0d0773848308a06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Siatkowski?= Date: Thu, 8 Aug 2019 11:54:16 +0200 Subject: [PATCH 2/4] common cleanup db trait for integration tests --- src/main/scala/com/redis/RedisClient.scala | 33 +++++++++++-------- src/main/scala/com/redis/ds/Deque.scala | 2 ++ src/test/scala/com/redis/DequeSpec.scala | 23 +++---------- .../scala/com/redis/EvalOperationsSpec.scala | 22 +++---------- .../scala/com/redis/GeoOperationsSpec.scala | 25 ++------------ .../scala/com/redis/HashOperationsSpec.scala | 22 +++---------- .../com/redis/HyperLogLogOperationsSpec.scala | 17 ++-------- .../scala/com/redis/ListOperationsSpec.scala | 21 +++--------- src/test/scala/com/redis/OperationsSpec.scala | 22 +++---------- src/test/scala/com/redis/PipelineSpec.scala | 21 +++--------- .../scala/com/redis/SerializationSpec.scala | 25 +++----------- .../scala/com/redis/SetOperationsSpec.scala | 22 +++---------- .../com/redis/SortedSetOperationsSpec.scala | 15 ++------- .../com/redis/StringOperationsSpec.scala | 22 +++---------- .../com/redis/cluster/RedisClusterSpec.scala | 23 ++++--------- .../com/redis/cluster/RedisShardsSpec.scala | 12 ++----- src/test/scala/com/redis/common/IntSpec.scala | 31 +++++++++++++++++ 17 files changed, 107 insertions(+), 251 deletions(-) create mode 100644 src/test/scala/com/redis/common/IntSpec.scala diff --git a/src/main/scala/com/redis/RedisClient.scala b/src/main/scala/com/redis/RedisClient.scala index f3bc02a5..3876c133 100644 --- a/src/main/scala/com/redis/RedisClient.scala +++ b/src/main/scala/com/redis/RedisClient.scala @@ -36,7 +36,7 @@ trait Redis extends IO with Protocol { if (reconnect) send(command, args)(result) else throw e } - + def send[A](command: String)(result: => A): A = try { write(Commands.multiBulk(List(command.getBytes("UTF-8")))) result @@ -57,14 +57,14 @@ trait Redis extends IO with Protocol { def reconnect: Boolean = { disconnect && initialize } - + protected def initialize : Boolean } trait RedisCommand extends Redis with Operations with GeoOperations - with NodeOperations + with NodeOperations with StringOperations with ListOperations with SetOperations @@ -72,14 +72,15 @@ trait RedisCommand extends Redis with HashOperations with EvalOperations with PubOperations - with HyperLogLogOperations { + with HyperLogLogOperations + with AutoCloseable { val database: Int = 0 val secret: Option[Any] = None - + override def initialize : Boolean = { if(connect) { - secret.foreach {s => + secret.foreach {s => auth(s) } selectDatabase() @@ -88,7 +89,7 @@ trait RedisCommand extends Redis false } } - + private def selectDatabase(): Unit = { if (database != 0) select(database) @@ -97,9 +98,9 @@ trait RedisCommand extends Redis private def authenticate(): Unit = { secret.foreach(auth _) } - + } - + class RedisClient(override val host: String, override val port: Int, override val database: Int = 0, override val secret: Option[Any] = None, override val timeout : Int = 0) @@ -133,11 +134,11 @@ class RedisClient(override val host: String, override val port: Int, } send("EXEC")(asExec(pipelineClient.handlers)) } catch { - case e: RedisMultiExecException => + case e: RedisMultiExecException => None } } - + import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.{Future, Promise} import scala.util.Try @@ -176,10 +177,10 @@ class RedisClient(override val host: String, override val port: Int, val f = Future { commands.map {command => i = i + 1 - Try { - command() + Try { + command() } recover { - case ex: java.lang.Exception => + case ex: java.lang.Exception => ps(i) success ex } foreach {r => ps(i) success r @@ -223,5 +224,9 @@ class RedisClient(override val host: String, override val port: Int, override def readLine = parent.readLine override def readCounted(count: Int) = parent.readCounted(count) override def initialize = parent.initialize + + override def close(): Unit = parent.close() } + + override def close(): Unit = disconnect } diff --git a/src/main/scala/com/redis/ds/Deque.scala b/src/main/scala/com/redis/ds/Deque.scala index 35b11688..5a637ddc 100644 --- a/src/main/scala/com/redis/ds/Deque.scala +++ b/src/main/scala/com/redis/ds/Deque.scala @@ -85,5 +85,7 @@ class RedisDequeClient(val h: String, val p: Int, val d: Int = 0, val s: Option[ override val database = d override val secret = s initialize + + override def close(): Unit = disconnect } } diff --git a/src/test/scala/com/redis/DequeSpec.scala b/src/test/scala/com/redis/DequeSpec.scala index f6c90caa..74776f6a 100644 --- a/src/test/scala/com/redis/DequeSpec.scala +++ b/src/test/scala/com/redis/DequeSpec.scala @@ -1,33 +1,18 @@ package com.redis.ds -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.common.IntSpec import org.junit.runner.RunWith +import org.scalatest.{FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class DequeSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisDequeClient("localhost", 6379).getDeque("td") - override def beforeEach = { - r.clear - } - - override def afterEach = { - r.clear - } - - override def afterAll = { - r.clear - } - describe("addFirst and check size and added element") { it("should add to the head of the deque") { r.addFirst("foo") should equal(Some(1)) diff --git a/src/test/scala/com/redis/EvalOperationsSpec.scala b/src/test/scala/com/redis/EvalOperationsSpec.scala index cc55e88d..f9fae758 100644 --- a/src/test/scala/com/redis/EvalOperationsSpec.scala +++ b/src/test/scala/com/redis/EvalOperationsSpec.scala @@ -1,32 +1,18 @@ package com.redis -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.common.IntSpec import org.junit.runner.RunWith +import org.scalatest.{FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class EvalOperationsSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("eval") { it("should eval lua code and get a string reply") { r.evalBulk[String]("return 'val1';", List(), List()) should be(Some("val1")) diff --git a/src/test/scala/com/redis/GeoOperationsSpec.scala b/src/test/scala/com/redis/GeoOperationsSpec.scala index 23ea5b26..00f8eea9 100644 --- a/src/test/scala/com/redis/GeoOperationsSpec.scala +++ b/src/test/scala/com/redis/GeoOperationsSpec.scala @@ -1,32 +1,13 @@ package com.redis -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith +import com.redis.common.IntSpec +import org.scalatest.{FunSpec, Matchers} -/** - * Created by alexis on 05/09/16. - */ class GeoOperationsSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("geoadd") { it("should add values with their coordinates and return the added quantity") { val out = r.geoadd("Sicily", Seq(("13.361389", "38.115556", "Palermo"), ("15.087269", "37.502669", "Catania"))) diff --git a/src/test/scala/com/redis/HashOperationsSpec.scala b/src/test/scala/com/redis/HashOperationsSpec.scala index efe661b6..b0fc5704 100644 --- a/src/test/scala/com/redis/HashOperationsSpec.scala +++ b/src/test/scala/com/redis/HashOperationsSpec.scala @@ -1,32 +1,18 @@ package com.redis -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.common.IntSpec import org.junit.runner.RunWith +import org.scalatest.{FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class HashOperationsSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("hset") { it("should set and get fields") { r.hset("hash1", "field1", "val") diff --git a/src/test/scala/com/redis/HyperLogLogOperationsSpec.scala b/src/test/scala/com/redis/HyperLogLogOperationsSpec.scala index ed503c7d..b3d09aca 100644 --- a/src/test/scala/com/redis/HyperLogLogOperationsSpec.scala +++ b/src/test/scala/com/redis/HyperLogLogOperationsSpec.scala @@ -1,29 +1,18 @@ package com.redis +import com.redis.common.IntSpec import org.junit.runner.RunWith -import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSpec, Matchers} import org.scalatest.junit.JUnitRunner +import org.scalatest.{FunSpec, Matchers} @RunWith(classOf[JUnitRunner]) class HyperLogLogOperationsSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("pfadd") { it("should return one for changed estimated cardinality") { r.pfadd("hll-updated-cardinality", "value1") should equal(Some(1)) diff --git a/src/test/scala/com/redis/ListOperationsSpec.scala b/src/test/scala/com/redis/ListOperationsSpec.scala index 8bf25f0c..2b767da0 100644 --- a/src/test/scala/com/redis/ListOperationsSpec.scala +++ b/src/test/scala/com/redis/ListOperationsSpec.scala @@ -1,32 +1,19 @@ package com.redis -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.common.IntSpec import org.junit.runner.RunWith +import org.scalatest.{BeforeAndAfterEach, FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class ListOperationsSpec extends FunSpec with Matchers with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("lpush") { it("should add to the head of the list") { r.lpush("list-1", "foo") should equal(Some(1)) diff --git a/src/test/scala/com/redis/OperationsSpec.scala b/src/test/scala/com/redis/OperationsSpec.scala index be6b42d2..af53c935 100644 --- a/src/test/scala/com/redis/OperationsSpec.scala +++ b/src/test/scala/com/redis/OperationsSpec.scala @@ -1,32 +1,18 @@ package com.redis -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.common.IntSpec import org.junit.runner.RunWith +import org.scalatest.{FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class OperationsSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("keys") { it("should fetch keys") { r.set("anshin-1", "debasish") diff --git a/src/test/scala/com/redis/PipelineSpec.scala b/src/test/scala/com/redis/PipelineSpec.scala index ecd1b0f5..d10bc203 100644 --- a/src/test/scala/com/redis/PipelineSpec.scala +++ b/src/test/scala/com/redis/PipelineSpec.scala @@ -1,30 +1,19 @@ package com.redis +import com.redis.common.IntSpec +import org.junit.runner.RunWith import org.scalatest._ import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith @RunWith(classOf[JUnitRunner]) class PipelineSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll + with IntSpec with Inside { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("pipeline1") { it("should do pipelined commands") { r.pipeline { p => @@ -93,9 +82,7 @@ class PipelineSpec extends FunSpec inside(res) { case List(true, Some(_), Some("debasish"), Some(_), None) => } } - import scala.concurrent.ExecutionContext.Implicits.global - import scala.concurrent.{Await, Future} - import scala.util.Success + import scala.concurrent.Await import scala.concurrent.duration._ describe("pipeline no multi 1") { diff --git a/src/test/scala/com/redis/SerializationSpec.scala b/src/test/scala/com/redis/SerializationSpec.scala index a7fa3eb2..0aee1f6a 100644 --- a/src/test/scala/com/redis/SerializationSpec.scala +++ b/src/test/scala/com/redis/SerializationSpec.scala @@ -1,33 +1,18 @@ package com.redis -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.common.IntSpec +import com.redis.serialization._ import org.junit.runner.RunWith - -import serialization._ +import org.scalatest.{FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class SerializationSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - it("should not conflict when using all built in parsers") { import Parse.Implicits._ r.hmset("hash", Map("field1" -> "1", "field2" -> 2)) diff --git a/src/test/scala/com/redis/SetOperationsSpec.scala b/src/test/scala/com/redis/SetOperationsSpec.scala index deb40814..b8fa28c8 100644 --- a/src/test/scala/com/redis/SetOperationsSpec.scala +++ b/src/test/scala/com/redis/SetOperationsSpec.scala @@ -1,32 +1,18 @@ package com.redis -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.common.IntSpec import org.junit.runner.RunWith +import org.scalatest.{FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class SetOperationsSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("sadd") { it("should add a non-existent value to the set") { r.sadd("set-1", "foo").get should equal(1) diff --git a/src/test/scala/com/redis/SortedSetOperationsSpec.scala b/src/test/scala/com/redis/SortedSetOperationsSpec.scala index 1726e649..e628b00e 100644 --- a/src/test/scala/com/redis/SortedSetOperationsSpec.scala +++ b/src/test/scala/com/redis/SortedSetOperationsSpec.scala @@ -7,27 +7,16 @@ import org.scalatest.Matchers import org.scalatest.junit.JUnitRunner import org.junit.runner.RunWith import com.redis.RedisClient.{DESC, SUM} +import com.redis.common.IntSpec @RunWith(classOf[JUnitRunner]) class SortedSetOperationsSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - import r._ private def add = { diff --git a/src/test/scala/com/redis/StringOperationsSpec.scala b/src/test/scala/com/redis/StringOperationsSpec.scala index c842286f..b1405006 100644 --- a/src/test/scala/com/redis/StringOperationsSpec.scala +++ b/src/test/scala/com/redis/StringOperationsSpec.scala @@ -1,33 +1,19 @@ package com.redis import java.util.concurrent.TimeUnit -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.common.IntSpec import org.junit.runner.RunWith +import org.scalatest.{FunSpec, Matchers} +import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class StringOperationsSpec extends FunSpec with Matchers -with BeforeAndAfterEach -with BeforeAndAfterAll { +with IntSpec { val r = new RedisClient("localhost", 6379) - override def beforeEach = { - } - - override def afterEach = { - r.flushdb - } - - override def afterAll = { - r.disconnect - } - describe("set") { it("should set key/value pairs") { r.set("anshin-1", "debasish") should equal(true) diff --git a/src/test/scala/com/redis/cluster/RedisClusterSpec.scala b/src/test/scala/com/redis/cluster/RedisClusterSpec.scala index 48d76ba2..918a1f17 100644 --- a/src/test/scala/com/redis/cluster/RedisClusterSpec.scala +++ b/src/test/scala/com/redis/cluster/RedisClusterSpec.scala @@ -1,34 +1,25 @@ package com.redis.cluster -import org.scalatest.FunSpec -import org.scalatest.BeforeAndAfterEach -import org.scalatest.BeforeAndAfterAll -import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner +import com.redis.RedisClient +import com.redis.common.IntSpec +import com.redis.serialization.Format import org.junit.runner.RunWith -import com.redis.{GeoRadiusMember, RedisClient} -import com.redis.serialization.{Format, Parse} +import org.scalatest.junit.JUnitRunner +import org.scalatest.{FunSpec, Matchers} -import collection.mutable.WrappedArray +import scala.collection.mutable.WrappedArray @RunWith(classOf[JUnitRunner]) class RedisClusterSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val nodes = Array(ClusterNode("node1", "localhost", 6379), ClusterNode("node2", "localhost", 6380), ClusterNode("node3", "localhost", 6381)) val r = new RedisCluster(new WrappedArray.ofRef(nodes).toSeq: _*) { val keyTag = Some(RegexKeyTag) } - override def beforeEach = {} - - override def afterEach = r.flushdb - - override def afterAll = r.close - def formattedKey(key: Any)(implicit format: Format) = { format(key) } diff --git a/src/test/scala/com/redis/cluster/RedisShardsSpec.scala b/src/test/scala/com/redis/cluster/RedisShardsSpec.scala index 4a1da10a..7d118c22 100644 --- a/src/test/scala/com/redis/cluster/RedisShardsSpec.scala +++ b/src/test/scala/com/redis/cluster/RedisShardsSpec.scala @@ -1,29 +1,23 @@ package com.redis.cluster import com.redis.RedisClient +import com.redis.common.IntSpec import com.redis.serialization.Format import org.junit.runner.RunWith -import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSpec, Matchers} import org.scalatest.junit.JUnitRunner +import org.scalatest.{FunSpec, Matchers} @RunWith(classOf[JUnitRunner]) class RedisShardsSpec extends FunSpec with Matchers - with BeforeAndAfterEach - with BeforeAndAfterAll { + with IntSpec { val nodes = List(ClusterNode("node1", "localhost", 6379), ClusterNode("node2", "localhost", 6380), ClusterNode("node3", "localhost", 6381)) val r = new RedisShards(nodes) { val keyTag = Some(RegexKeyTag) } - override def beforeEach = {} - - override def afterEach = r.flushdb - - override def afterAll = r.close - def formattedKey(key: Any)(implicit format: Format) = { format(key) } diff --git a/src/test/scala/com/redis/common/IntSpec.scala b/src/test/scala/com/redis/common/IntSpec.scala new file mode 100644 index 00000000..31e9b3b0 --- /dev/null +++ b/src/test/scala/com/redis/common/IntSpec.scala @@ -0,0 +1,31 @@ +package com.redis.common + +import com.redis.api.BaseApi +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite} + +trait IntSpec extends BeforeAndAfterAll with BeforeAndAfterEach { + that: Suite => + + val r: BaseApi with AutoCloseable + + override def afterAll: Unit = { + r.flushall + r.close() + super.afterAll() + } + + override def beforeAll: Unit = { + super.beforeAll() + r.flushall + } + + override def beforeEach: Unit = { + super.beforeEach() + } + + override def afterEach: Unit = { + r.flushdb + super.afterEach() + } + +} From 493cb6849fa35773d85a62c857eb1e6513218508 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Siatkowski?= Date: Thu, 8 Aug 2019 12:03:39 +0200 Subject: [PATCH 3/4] removed junit --- build.sbt | 1 - src/test/scala/com/redis/BlockingDequeSpec.scala | 6 +----- src/test/scala/com/redis/DequeSpec.scala | 6 ++---- src/test/scala/com/redis/EvalOperationsSpec.scala | 4 +--- src/test/scala/com/redis/HashOperationsSpec.scala | 6 ++---- src/test/scala/com/redis/HyperLogLogOperationsSpec.scala | 4 +--- src/test/scala/com/redis/ListOperationsSpec.scala | 6 ++---- src/test/scala/com/redis/OperationsSpec.scala | 6 ++---- src/test/scala/com/redis/PatternsSpec.scala | 5 +---- src/test/scala/com/redis/PipelineSpec.scala | 4 +--- src/test/scala/com/redis/PoolSpec.scala | 5 +---- src/test/scala/com/redis/PubSubSpec.scala | 6 +----- src/test/scala/com/redis/RedisClientSpec.scala | 3 --- src/test/scala/com/redis/SerializationSpec.scala | 6 ++---- src/test/scala/com/redis/SetOperationsSpec.scala | 4 +--- src/test/scala/com/redis/SortedSetOperationsSpec.scala | 6 ++---- src/test/scala/com/redis/StringOperationsSpec.scala | 4 +--- src/test/scala/com/redis/WatchSpec.scala | 6 +----- src/test/scala/com/redis/cluster/RedisClusterSpec.scala | 6 ++---- src/test/scala/com/redis/cluster/RedisShardsSpec.scala | 6 ++---- 20 files changed, 26 insertions(+), 74 deletions(-) diff --git a/build.sbt b/build.sbt index 58472bf3..0e1ec9d8 100644 --- a/build.sbt +++ b/build.sbt @@ -22,7 +22,6 @@ lazy val coreSettings = commonSettings ++ Seq( "org.slf4j" % "slf4j-api" % "1.7.26", "org.slf4j" % "slf4j-log4j12" % "1.7.26" % "provided", "log4j" % "log4j" % "1.2.17" % "provided", - "junit" % "junit" % "4.12" % "test", "org.scalatest" %% "scalatest" % "3.0.8" % "test"), parallelExecution in Test := false, diff --git a/src/test/scala/com/redis/BlockingDequeSpec.scala b/src/test/scala/com/redis/BlockingDequeSpec.scala index f6fec634..9624ff9a 100644 --- a/src/test/scala/com/redis/BlockingDequeSpec.scala +++ b/src/test/scala/com/redis/BlockingDequeSpec.scala @@ -4,12 +4,8 @@ import org.scalatest.FunSpec import org.scalatest.BeforeAndAfterEach import org.scalatest.BeforeAndAfterAll import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith - -@RunWith(classOf[JUnitRunner]) -class BlockingDequeSpec extends FunSpec +class BlockingDequeSpec extends FunSpec with Matchers with BeforeAndAfterEach with BeforeAndAfterAll { diff --git a/src/test/scala/com/redis/DequeSpec.scala b/src/test/scala/com/redis/DequeSpec.scala index 74776f6a..259d8cd1 100644 --- a/src/test/scala/com/redis/DequeSpec.scala +++ b/src/test/scala/com/redis/DequeSpec.scala @@ -1,13 +1,11 @@ package com.redis.ds import com.redis.common.IntSpec -import org.junit.runner.RunWith import org.scalatest.{FunSpec, Matchers} -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) -class DequeSpec extends FunSpec + +class DequeSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/EvalOperationsSpec.scala b/src/test/scala/com/redis/EvalOperationsSpec.scala index f9fae758..236b97e5 100644 --- a/src/test/scala/com/redis/EvalOperationsSpec.scala +++ b/src/test/scala/com/redis/EvalOperationsSpec.scala @@ -1,12 +1,10 @@ package com.redis import com.redis.common.IntSpec -import org.junit.runner.RunWith import org.scalatest.{FunSpec, Matchers} -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) + class EvalOperationsSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/HashOperationsSpec.scala b/src/test/scala/com/redis/HashOperationsSpec.scala index b0fc5704..6a0b1270 100644 --- a/src/test/scala/com/redis/HashOperationsSpec.scala +++ b/src/test/scala/com/redis/HashOperationsSpec.scala @@ -1,13 +1,11 @@ package com.redis import com.redis.common.IntSpec -import org.junit.runner.RunWith import org.scalatest.{FunSpec, Matchers} -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) -class HashOperationsSpec extends FunSpec + +class HashOperationsSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/HyperLogLogOperationsSpec.scala b/src/test/scala/com/redis/HyperLogLogOperationsSpec.scala index b3d09aca..91915e22 100644 --- a/src/test/scala/com/redis/HyperLogLogOperationsSpec.scala +++ b/src/test/scala/com/redis/HyperLogLogOperationsSpec.scala @@ -1,12 +1,10 @@ package com.redis import com.redis.common.IntSpec -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner + import org.scalatest.{FunSpec, Matchers} -@RunWith(classOf[JUnitRunner]) class HyperLogLogOperationsSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/ListOperationsSpec.scala b/src/test/scala/com/redis/ListOperationsSpec.scala index 2b767da0..2b68e90d 100644 --- a/src/test/scala/com/redis/ListOperationsSpec.scala +++ b/src/test/scala/com/redis/ListOperationsSpec.scala @@ -1,13 +1,11 @@ package com.redis import com.redis.common.IntSpec -import org.junit.runner.RunWith import org.scalatest.{BeforeAndAfterEach, FunSpec, Matchers} -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) -class ListOperationsSpec extends FunSpec + +class ListOperationsSpec extends FunSpec with Matchers with BeforeAndAfterEach with IntSpec { diff --git a/src/test/scala/com/redis/OperationsSpec.scala b/src/test/scala/com/redis/OperationsSpec.scala index af53c935..6baa448d 100644 --- a/src/test/scala/com/redis/OperationsSpec.scala +++ b/src/test/scala/com/redis/OperationsSpec.scala @@ -1,13 +1,11 @@ package com.redis import com.redis.common.IntSpec -import org.junit.runner.RunWith import org.scalatest.{FunSpec, Matchers} -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) -class OperationsSpec extends FunSpec + +class OperationsSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/PatternsSpec.scala b/src/test/scala/com/redis/PatternsSpec.scala index 016f24fa..b076876e 100644 --- a/src/test/scala/com/redis/PatternsSpec.scala +++ b/src/test/scala/com/redis/PatternsSpec.scala @@ -4,13 +4,10 @@ import org.scalatest.FunSpec import org.scalatest.BeforeAndAfterEach import org.scalatest.BeforeAndAfterAll import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith import Patterns._ -@RunWith(classOf[JUnitRunner]) -class PatternsSpec extends FunSpec +class PatternsSpec extends FunSpec with Matchers with BeforeAndAfterEach with BeforeAndAfterAll { diff --git a/src/test/scala/com/redis/PipelineSpec.scala b/src/test/scala/com/redis/PipelineSpec.scala index d10bc203..b8d51dac 100644 --- a/src/test/scala/com/redis/PipelineSpec.scala +++ b/src/test/scala/com/redis/PipelineSpec.scala @@ -1,12 +1,10 @@ package com.redis import com.redis.common.IntSpec -import org.junit.runner.RunWith import org.scalatest._ -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) + class PipelineSpec extends FunSpec with Matchers with IntSpec diff --git a/src/test/scala/com/redis/PoolSpec.scala b/src/test/scala/com/redis/PoolSpec.scala index b857203c..c95a75fb 100644 --- a/src/test/scala/com/redis/PoolSpec.scala +++ b/src/test/scala/com/redis/PoolSpec.scala @@ -4,15 +4,12 @@ import org.scalatest.FunSpec import org.scalatest.BeforeAndAfterEach import org.scalatest.BeforeAndAfterAll import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith import scala.concurrent._ import scala.concurrent.duration._ import ExecutionContext.Implicits.global -@RunWith(classOf[JUnitRunner]) -class PoolSpec extends FunSpec +class PoolSpec extends FunSpec with Matchers with BeforeAndAfterEach with BeforeAndAfterAll { diff --git a/src/test/scala/com/redis/PubSubSpec.scala b/src/test/scala/com/redis/PubSubSpec.scala index 9574cea0..c4c82821 100644 --- a/src/test/scala/com/redis/PubSubSpec.scala +++ b/src/test/scala/com/redis/PubSubSpec.scala @@ -4,12 +4,8 @@ import org.scalatest.FunSpec import org.scalatest.BeforeAndAfterEach import org.scalatest.BeforeAndAfterAll import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith - -@RunWith(classOf[JUnitRunner]) -class PubSubSpec extends FunSpec +class PubSubSpec extends FunSpec with Matchers with BeforeAndAfterEach with BeforeAndAfterAll { diff --git a/src/test/scala/com/redis/RedisClientSpec.scala b/src/test/scala/com/redis/RedisClientSpec.scala index 2a952ec2..8599a7ba 100644 --- a/src/test/scala/com/redis/RedisClientSpec.scala +++ b/src/test/scala/com/redis/RedisClientSpec.scala @@ -4,10 +4,7 @@ import java.net.URI import org.scalatest.FunSpec import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith -@RunWith(classOf[JUnitRunner]) class RedisClientSpec extends FunSpec with Matchers { diff --git a/src/test/scala/com/redis/SerializationSpec.scala b/src/test/scala/com/redis/SerializationSpec.scala index 0aee1f6a..ce6da770 100644 --- a/src/test/scala/com/redis/SerializationSpec.scala +++ b/src/test/scala/com/redis/SerializationSpec.scala @@ -2,12 +2,10 @@ package com.redis import com.redis.common.IntSpec import com.redis.serialization._ -import org.junit.runner.RunWith import org.scalatest.{FunSpec, Matchers} -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) -class SerializationSpec extends FunSpec + +class SerializationSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/SetOperationsSpec.scala b/src/test/scala/com/redis/SetOperationsSpec.scala index b8fa28c8..238e7e7f 100644 --- a/src/test/scala/com/redis/SetOperationsSpec.scala +++ b/src/test/scala/com/redis/SetOperationsSpec.scala @@ -1,12 +1,10 @@ package com.redis import com.redis.common.IntSpec -import org.junit.runner.RunWith import org.scalatest.{FunSpec, Matchers} -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) + class SetOperationsSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/SortedSetOperationsSpec.scala b/src/test/scala/com/redis/SortedSetOperationsSpec.scala index e628b00e..4af191a5 100644 --- a/src/test/scala/com/redis/SortedSetOperationsSpec.scala +++ b/src/test/scala/com/redis/SortedSetOperationsSpec.scala @@ -4,14 +4,12 @@ import org.scalatest.FunSpec import org.scalatest.BeforeAndAfterEach import org.scalatest.BeforeAndAfterAll import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith + import com.redis.RedisClient.{DESC, SUM} import com.redis.common.IntSpec -@RunWith(classOf[JUnitRunner]) -class SortedSetOperationsSpec extends FunSpec +class SortedSetOperationsSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/StringOperationsSpec.scala b/src/test/scala/com/redis/StringOperationsSpec.scala index b1405006..915bd1f8 100644 --- a/src/test/scala/com/redis/StringOperationsSpec.scala +++ b/src/test/scala/com/redis/StringOperationsSpec.scala @@ -2,12 +2,10 @@ package com.redis import java.util.concurrent.TimeUnit import com.redis.common.IntSpec -import org.junit.runner.RunWith import org.scalatest.{FunSpec, Matchers} -import org.scalatest.junit.JUnitRunner -@RunWith(classOf[JUnitRunner]) + class StringOperationsSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/WatchSpec.scala b/src/test/scala/com/redis/WatchSpec.scala index 787a96cb..7f1d2319 100644 --- a/src/test/scala/com/redis/WatchSpec.scala +++ b/src/test/scala/com/redis/WatchSpec.scala @@ -4,12 +4,8 @@ import org.scalatest.FunSpec import org.scalatest.BeforeAndAfterEach import org.scalatest.BeforeAndAfterAll import org.scalatest.Matchers -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith - -@RunWith(classOf[JUnitRunner]) -class WatchSpec extends FunSpec +class WatchSpec extends FunSpec with Matchers with BeforeAndAfterEach with BeforeAndAfterAll { diff --git a/src/test/scala/com/redis/cluster/RedisClusterSpec.scala b/src/test/scala/com/redis/cluster/RedisClusterSpec.scala index 918a1f17..9d40a96d 100644 --- a/src/test/scala/com/redis/cluster/RedisClusterSpec.scala +++ b/src/test/scala/com/redis/cluster/RedisClusterSpec.scala @@ -3,15 +3,13 @@ package com.redis.cluster import com.redis.RedisClient import com.redis.common.IntSpec import com.redis.serialization.Format -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner + import org.scalatest.{FunSpec, Matchers} import scala.collection.mutable.WrappedArray -@RunWith(classOf[JUnitRunner]) -class RedisClusterSpec extends FunSpec +class RedisClusterSpec extends FunSpec with Matchers with IntSpec { diff --git a/src/test/scala/com/redis/cluster/RedisShardsSpec.scala b/src/test/scala/com/redis/cluster/RedisShardsSpec.scala index 7d118c22..fdfdd376 100644 --- a/src/test/scala/com/redis/cluster/RedisShardsSpec.scala +++ b/src/test/scala/com/redis/cluster/RedisShardsSpec.scala @@ -3,13 +3,11 @@ package com.redis.cluster import com.redis.RedisClient import com.redis.common.IntSpec import com.redis.serialization.Format -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner + import org.scalatest.{FunSpec, Matchers} -@RunWith(classOf[JUnitRunner]) -class RedisShardsSpec extends FunSpec +class RedisShardsSpec extends FunSpec with Matchers with IntSpec { From 4c795a67c33dafac04fd0c5af6ff71034cc0e9c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Siatkowski?= Date: Fri, 16 Aug 2019 16:42:12 +0200 Subject: [PATCH 4/4] generalize cluster and shard tests to common class --- .../com/redis/cluster/RedisCluster.scala | 7 +- .../com/redis/cluster/RedisClusterOps.scala | 10 +- .../scala/com/redis/cluster/RedisShards.scala | 7 +- .../com/redis/cluster/WithHashRing.scala | 7 + .../cluster/CommonRedisClusterSpec.scala | 171 ++++++++++++++++++ .../com/redis/cluster/IntClusterSpec.scala | 19 ++ .../com/redis/cluster/RedisClusterSpec.scala | 145 +-------------- .../com/redis/cluster/RedisShardsSpec.scala | 138 +------------- 8 files changed, 219 insertions(+), 285 deletions(-) create mode 100644 src/main/scala/com/redis/cluster/WithHashRing.scala create mode 100644 src/test/scala/com/redis/cluster/CommonRedisClusterSpec.scala create mode 100644 src/test/scala/com/redis/cluster/IntClusterSpec.scala diff --git a/src/main/scala/com/redis/cluster/RedisCluster.scala b/src/main/scala/com/redis/cluster/RedisCluster.scala index 7b322250..c3e019c4 100644 --- a/src/main/scala/com/redis/cluster/RedisCluster.scala +++ b/src/main/scala/com/redis/cluster/RedisCluster.scala @@ -60,6 +60,7 @@ case class ClusterNode(nodename: String, host: String, port: Int, database: Int abstract class RedisCluster(hosts: ClusterNode*) extends RedisClusterOps + with WithHashRing[IdentifiableRedisClientPool] with BaseOps with NodeOps with StringOps @@ -72,12 +73,12 @@ abstract class RedisCluster(hosts: ClusterNode*) with HashOps { // instantiating a cluster will automatically connect participating nodes to the server - val clients: List[IdentifiableRedisClientPool] = hosts.toList.map { h => + protected[cluster] val clients: List[IdentifiableRedisClientPool] = hosts.toList.map { h => new IdentifiableRedisClientPool(h) } // the hash ring will instantiate with the nodes up and added - val hr: HashRing[IdentifiableRedisClientPool] = HashRing[IdentifiableRedisClientPool](clients, POINTS_PER_SERVER) + override protected[cluster] val hr: HashRing[IdentifiableRedisClientPool] = HashRing[IdentifiableRedisClientPool](clients, POINTS_PER_SERVER) override def nodeForKey(key: Any)(implicit format: Format): IdentifiableRedisClientPool = { val bKey = format(key) @@ -109,7 +110,7 @@ abstract class RedisCluster(hosts: ClusterNode*) hr.cluster.map(_.node).toList } - override def onAllConns[T](body: RedisClient => T): Iterable[T] = + override protected[cluster] def onAllConns[T](body: RedisClient => T): Iterable[T] = hr.cluster.map(p => p.withClient { client => body(client) }) // .forall(_ == true) def close(): Unit = hr.cluster.map(_.close) diff --git a/src/main/scala/com/redis/cluster/RedisClusterOps.scala b/src/main/scala/com/redis/cluster/RedisClusterOps.scala index 9e7d0753..1c58dc0e 100644 --- a/src/main/scala/com/redis/cluster/RedisClusterOps.scala +++ b/src/main/scala/com/redis/cluster/RedisClusterOps.scala @@ -7,14 +7,14 @@ trait RedisClusterOps extends AutoCloseable { val keyTag: Option[KeyTag] - protected val POINTS_PER_SERVER = 160 // default in libmemcached + protected[cluster] val POINTS_PER_SERVER = 160 // default in libmemcached /** * get node for the key */ - protected def nodeForKey(key: Any)(implicit format: Format): RedisClientPool + protected[cluster] def nodeForKey(key: Any)(implicit format: Format): RedisClientPool - protected def onAllConns[T](body: RedisClient => T): Iterable[T] + protected[cluster] def onAllConns[T](body: RedisClient => T): Iterable[T] /** * add server to internal pool @@ -43,11 +43,11 @@ trait RedisClusterOps extends AutoCloseable { */ def listServers: List[ClusterNode] - def processForKey[T](key: Any)(body: RedisCommand => T)(implicit format: Format): T = { + protected[cluster] def processForKey[T](key: Any)(body: RedisCommand => T)(implicit format: Format): T = { nodeForKey(key).withClient(body(_)) } - def inSameNode[T](keys: Any*)(body: RedisClient => T)(implicit format: Format): T = { + protected[cluster] def inSameNode[T](keys: Any*)(body: RedisClient => T)(implicit format: Format): T = { val nodes = keys.toList.map(nodeForKey(_)) if (nodes.forall(_ == nodes.head)) { nodes.head.withClient(body(_)) diff --git a/src/main/scala/com/redis/cluster/RedisShards.scala b/src/main/scala/com/redis/cluster/RedisShards.scala index 50d30e22..aa3eda39 100644 --- a/src/main/scala/com/redis/cluster/RedisShards.scala +++ b/src/main/scala/com/redis/cluster/RedisShards.scala @@ -5,6 +5,7 @@ import com.redis.serialization._ abstract class RedisShards(val hosts: List[ClusterNode]) extends RedisClusterOps + with WithHashRing[String] with BaseOps with NodeOps with StringOps @@ -21,9 +22,9 @@ abstract class RedisShards(val hosts: List[ClusterNode]) private var clients = hosts.map { h => (h.nodename, new IdentifiableRedisClientPool(h)) } toMap // the hash ring will instantiate with the nodes up and added - val hr: HashRing[String] = HashRing[String](hosts.map(_.nodename), POINTS_PER_SERVER) + override protected[cluster] val hr: HashRing[String] = HashRing[String](hosts.map(_.nodename), POINTS_PER_SERVER) - override def nodeForKey(key: Any)(implicit format: Format): RedisClientPool = { + override protected[cluster] def nodeForKey(key: Any)(implicit format: Format): RedisClientPool = { val bKey = format(key) val selectedNode = hr.getNode(keyTag.flatMap(_.tag(bKey.toIndexedSeq)).getOrElse(bKey.toIndexedSeq)) clients(selectedNode) @@ -55,7 +56,7 @@ abstract class RedisShards(val hosts: List[ClusterNode]) clients.values.map(_.node).toList } - override def onAllConns[T](body: RedisClient => T): Iterable[T] = + override protected[cluster] def onAllConns[T](body: RedisClient => T): Iterable[T] = clients.values.map(p => p.withClient { client => body(client) }) // .forall(_ == true) def close(): Unit = clients.values.map(_.close) diff --git a/src/main/scala/com/redis/cluster/WithHashRing.scala b/src/main/scala/com/redis/cluster/WithHashRing.scala new file mode 100644 index 00000000..12e4bd78 --- /dev/null +++ b/src/main/scala/com/redis/cluster/WithHashRing.scala @@ -0,0 +1,7 @@ +package com.redis.cluster + +trait WithHashRing[T] { + + protected[cluster] val hr: HashRing[T] + +} diff --git a/src/test/scala/com/redis/cluster/CommonRedisClusterSpec.scala b/src/test/scala/com/redis/cluster/CommonRedisClusterSpec.scala new file mode 100644 index 00000000..5c64130a --- /dev/null +++ b/src/test/scala/com/redis/cluster/CommonRedisClusterSpec.scala @@ -0,0 +1,171 @@ +package com.redis.cluster + +import com.redis.RedisClient +import com.redis.api.BaseApi +import org.scalatest.{Assertion, FunSpec, Matchers} + +import scala.collection.mutable.ArrayBuffer + +// todo: remove, test every API separately +trait CommonRedisClusterSpec[A] extends FunSpec with Matchers with IntClusterSpec { + + type SuperCluster = BaseApi with AutoCloseable with RedisClusterOps with BaseOps with NodeOps with StringOps + with ListOps with SetOps with SortedSetOps with HashOps with WithHashRing[A] + + override val r: SuperCluster = rProvider() + + def rProvider(): SuperCluster + + describe("cluster operations") { + shouldSet() + shouldGetKeysFromProperNodes() + shouldDoAllOpsOnTheCluster() + shouldMGet() + shouldListOps() + shouldKeyTags() + shouldReplaceNode() + shouldRemoveNode() + shouldListNodes() + } + + def shouldSet(): Unit = { + it("should set") { + val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") + + // last 3 should map to the same node + l.map(r.nodeForKey(_)).reverse.slice(0, 3).forall(_.toString == "node2") should equal(true) + + // set + l foreach (s => r.processForKey(s)(_.set(s, "working in anshin")) should equal(true)) + + // check get: should return all 5 + r.keys("*").get.size should equal(5) + } + } + + def shouldGetKeysFromProperNodes(): Unit = { + it("should get keys from proper nodes") { + val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") + + // set + l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) + + r.get("debasish").get should equal("debasish is working in anshin") + r.get("maulindu").get should equal("maulindu is working in anshin") + l.map(r.get(_).get).distinct.size should equal(5) + } + } + + def shouldDoAllOpsOnTheCluster(): Unit = { + it("should do all operations on the cluster") { + val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") + + // set + l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) + + r.dbsize.get should equal(5) + r.exists("debasish") should equal(true) + r.exists("maulindu") should equal(true) + r.exists("debasish-1") should equal(false) + + r.del("debasish", "nilanjan").get should equal(2) + r.dbsize.get should equal(3) + r.del("satire").get should equal(0) + } + } + + def shouldMGet(): Unit = { + it("mget on a cluster should fetch values in the same order as the keys") { + val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") + + // set + l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) + + // mget + r.mget(l.head, l.tail: _*).get.map(_.get.split(" ")(0)) should equal(l) + } + } + + def shouldListOps(): Unit = { + it("list operations should work on the cluster") { + r.lpush("java-virtual-machine-langs", "java") should equal(Some(1)) + r.lpush("java-virtual-machine-langs", "jruby") should equal(Some(2)) + r.lpush("java-virtual-machine-langs", "groovy") should equal(Some(3)) + r.lpush("java-virtual-machine-langs", "scala") should equal(Some(4)) + r.llen("java-virtual-machine-langs") should equal(Some(4)) + } + } + + def shouldKeyTags(): Unit = { + it("keytags should ensure mapping to the same server") { + r.lpush("java-virtual-machine-{langs}", "java") should equal(Some(1)) + r.lpush("java-virtual-machine-{langs}", "jruby") should equal(Some(2)) + r.lpush("java-virtual-machine-{langs}", "groovy") should equal(Some(3)) + r.lpush("java-virtual-machine-{langs}", "scala") should equal(Some(4)) + r.llen("java-virtual-machine-{langs}") should equal(Some(4)) + r.lpush("microsoft-platform-{langs}", "c++") should equal(Some(1)) + r.rpoplpush("java-virtual-machine-{langs}", "microsoft-platform-{langs}").get should equal("java") + r.llen("java-virtual-machine-{langs}") should equal(Some(3)) + r.llen("microsoft-platform-{langs}") should equal(Some(2)) + } + } + + def shouldReplaceNode(): Unit = { + it("replace node should not change hash ring order") { + val r = rProvider() + r.set("testkey1", "testvalue2") + r.get("testkey1") should equal(Some("testvalue2")) + + val nodename = r.hr.getNode(formattedKey("testkey1").toIndexedSeq).toString + + //simulate the same value is duplicated to slave + //for test, don't set to master, just to make sure the expected value is loaded from slave + val redisClient = new RedisClient("localhost", 6382) + redisClient.set("testkey1", "testvalue1") + + //replaced master with slave on the same node + r.replaceServer(ClusterNode(nodename, "localhost", 6382)) + r.nodeForKey("testkey1").port should equal(6382) + + // todo: special check for RedisCluster + specialClusterCheck(r.hr.cluster, nodename) + r.get("testkey1") should equal(Some("testvalue1")) + + //switch back to master. the old value is loaded + val oldnode = nodes.filter(_.nodename.equals(nodename))(0) + r.replaceServer(oldnode) + r.get("testkey1") should equal(Some("testvalue2")) + r.close() + } + } + + def specialClusterCheck(cluster: ArrayBuffer[A], nodename: String): Assertion = succeed + + def shouldRemoveNode(): Unit = { + it("remove failure node should change hash ring order so that key on failure node should be served by other running nodes") { + val r = rProvider() + r.set("testkey1", "testvalue2") + r.get("testkey1") should equal(Some("testvalue2")) + + val nodename = r.hr.getNode(formattedKey("testkey1").toIndexedSeq).toString + + //replaced master with slave on the same node + r.removeServer(nodename) + r.get("testkey1") should equal(None) + + r.set("testkey1", "testvalue2") + r.get("testkey1") should equal(Some("testvalue2")) + r.close() + } + } + + def shouldListNodes(): Unit = { + it("list nodes should return the running nodes but not configured nodes") { + val r = rProvider() + r.listServers.toSet should equal(nodes.toSet) + r.removeServer("node1") + r.listServers.toSet should equal(nodes.filterNot(_.nodename.equals("node1")).toSet) + r.close() + } + } +} diff --git a/src/test/scala/com/redis/cluster/IntClusterSpec.scala b/src/test/scala/com/redis/cluster/IntClusterSpec.scala new file mode 100644 index 00000000..43add887 --- /dev/null +++ b/src/test/scala/com/redis/cluster/IntClusterSpec.scala @@ -0,0 +1,19 @@ +package com.redis.cluster + +import com.redis.common.IntSpec +import com.redis.serialization.Format +import org.scalatest.Suite + +trait IntClusterSpec extends IntSpec { + that: Suite => + + val nodes = List( + ClusterNode("node1", "localhost", 6379), + ClusterNode("node2", "localhost", 6380), + ClusterNode("node3", "localhost", 6381) + ) + + def formattedKey(key: Any)(implicit format: Format): Array[Byte] = { + format(key) + } +} diff --git a/src/test/scala/com/redis/cluster/RedisClusterSpec.scala b/src/test/scala/com/redis/cluster/RedisClusterSpec.scala index 9d40a96d..a6b41f35 100644 --- a/src/test/scala/com/redis/cluster/RedisClusterSpec.scala +++ b/src/test/scala/com/redis/cluster/RedisClusterSpec.scala @@ -1,148 +1,17 @@ package com.redis.cluster -import com.redis.RedisClient -import com.redis.common.IntSpec -import com.redis.serialization.Format +import com.redis.IdentifiableRedisClientPool +import org.scalatest.Assertion -import org.scalatest.{FunSpec, Matchers} +import scala.collection.mutable.ArrayBuffer -import scala.collection.mutable.WrappedArray +class RedisClusterSpec extends CommonRedisClusterSpec[IdentifiableRedisClientPool] { -class RedisClusterSpec extends FunSpec - with Matchers - with IntSpec { - - val nodes = Array(ClusterNode("node1", "localhost", 6379), ClusterNode("node2", "localhost", 6380), ClusterNode("node3", "localhost", 6381)) - val r = new RedisCluster(new WrappedArray.ofRef(nodes).toSeq: _*) { + override def rProvider(): SuperCluster = new RedisCluster(nodes: _*) { val keyTag = Some(RegexKeyTag) } - def formattedKey(key: Any)(implicit format: Format) = { - format(key) - } - - describe("cluster operations") { - it("should set") { - val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") - - // last 3 should map to the same node - l.map(r.nodeForKey(_)).reverse.slice(0, 3).forall(_.toString == "node2") should equal(true) - - // set - l foreach (s => r.processForKey(s)(_.set(s, "working in anshin")) should equal(true)) - - // check get: should return all 5 - r.keys("*").get.size should equal(5) - } - - it("should get keys from proper nodes") { - val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") - - // set - l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) - - r.get("debasish").get should equal("debasish is working in anshin") - r.get("maulindu").get should equal("maulindu is working in anshin") - l.map(r.get(_).get).distinct.size should equal(5) - } - - it("should do all operations on the cluster") { - val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") - - // set - l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) - - r.dbsize.get should equal(5) - r.exists("debasish") should equal(true) - r.exists("maulindu") should equal(true) - r.exists("debasish-1") should equal(false) - - r.del("debasish", "nilanjan").get should equal(2) - r.dbsize.get should equal(3) - r.del("satire").get should equal(0) - } - - it("mget on a cluster should fetch values in the same order as the keys") { - val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") - - // set - l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) - - // mget - r.mget(l.head, l.tail: _*).get.map(_.get.split(" ")(0)) should equal(l) - } - - it("list operations should work on the cluster"){ - r.lpush("java-virtual-machine-langs", "java") should equal(Some(1)) - r.lpush("java-virtual-machine-langs", "jruby") should equal(Some(2)) - r.lpush("java-virtual-machine-langs", "groovy") should equal(Some(3)) - r.lpush("java-virtual-machine-langs", "scala") should equal(Some(4)) - r.llen("java-virtual-machine-langs") should equal(Some(4)) - } - - it("keytags should ensure mapping to the same server"){ - r.lpush("java-virtual-machine-{langs}", "java") should equal(Some(1)) - r.lpush("java-virtual-machine-{langs}", "jruby") should equal(Some(2)) - r.lpush("java-virtual-machine-{langs}", "groovy") should equal(Some(3)) - r.lpush("java-virtual-machine-{langs}", "scala") should equal(Some(4)) - r.llen("java-virtual-machine-{langs}") should equal(Some(4)) - r.lpush("microsoft-platform-{langs}", "c++") should equal(Some(1)) - r.rpoplpush("java-virtual-machine-{langs}", "microsoft-platform-{langs}").get should equal("java") - r.llen("java-virtual-machine-{langs}") should equal(Some(3)) - r.llen("microsoft-platform-{langs}") should equal(Some(2)) - } - - it("replace node should not change hash ring order"){ - val r = new RedisCluster(new WrappedArray.ofRef(nodes).toSeq: _*) { - val keyTag = Some(RegexKeyTag) - } - r.set("testkey1", "testvalue2") - r.get("testkey1") should equal (Some("testvalue2")) - - val nodename = r.hr.getNode(formattedKey("testkey1").toIndexedSeq).toString - - //simulate the same value is duplicated to slave - //for test, don't set to master, just to make sure the expected value is loaded from slave - val redisClient = new RedisClient("localhost", 6382) - redisClient.set("testkey1", "testvalue1") - - //replaced master with slave on the same node - r.replaceServer(ClusterNode(nodename, "localhost", 6382)) - r.nodeForKey("testkey1").port should equal (6382) - r.hr.cluster.find(_.node.nodename.equals(nodename)).get.port should equal(6382) - r.get("testkey1") should equal (Some("testvalue1")) - - //switch back to master. the old value is loaded - val oldnode = nodes.filter(_.nodename.equals(nodename))(0) - r.replaceServer(oldnode) - r.get("testkey1") should equal (Some("testvalue2")) - } - - it("remove failure node should change hash ring order so that key on failure node should be served by other running nodes"){ - val r = new RedisCluster(new WrappedArray.ofRef(nodes).toSeq: _*) { - val keyTag = Some(RegexKeyTag) - } - r.set("testkey1", "testvalue2") - r.get("testkey1") should equal (Some("testvalue2")) - - val nodename = r.hr.getNode(formattedKey("testkey1").toIndexedSeq).toString - - //replaced master with slave on the same node - r.removeServer(nodename) - r.get("testkey1") should equal (None) - - r.set("testkey1", "testvalue2") - r.get("testkey1") should equal (Some("testvalue2")) - } - - it("list nodes should return the running nodes but not configured nodes"){ - val r = new RedisCluster(new WrappedArray.ofRef(nodes).toSeq: _*) { - val keyTag = Some(RegexKeyTag) - } - r.listServers.toSet should equal (nodes.toSet) - r.removeServer("node1") - r.listServers.toSet should equal (nodes.filterNot(_.nodename.equals("node1")).toSet) - } - } + override def specialClusterCheck(cluster: ArrayBuffer[IdentifiableRedisClientPool], nodename: String): Assertion = + cluster.find(_.node.nodename.equals(nodename)).get.port should equal(6382) } diff --git a/src/test/scala/com/redis/cluster/RedisShardsSpec.scala b/src/test/scala/com/redis/cluster/RedisShardsSpec.scala index fdfdd376..ba7b5370 100644 --- a/src/test/scala/com/redis/cluster/RedisShardsSpec.scala +++ b/src/test/scala/com/redis/cluster/RedisShardsSpec.scala @@ -1,144 +1,10 @@ package com.redis.cluster -import com.redis.RedisClient -import com.redis.common.IntSpec -import com.redis.serialization.Format -import org.scalatest.{FunSpec, Matchers} +class RedisShardsSpec extends CommonRedisClusterSpec[String] { - -class RedisShardsSpec extends FunSpec - with Matchers - with IntSpec { - - val nodes = List(ClusterNode("node1", "localhost", 6379), ClusterNode("node2", "localhost", 6380), ClusterNode("node3", "localhost", 6381)) - val r = new RedisShards(nodes) { + override def rProvider(): SuperCluster = new RedisShards(nodes) { val keyTag = Some(RegexKeyTag) } - def formattedKey(key: Any)(implicit format: Format) = { - format(key) - } - - describe("cluster operations") { - it("should set") { - val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") - - // last 3 should map to the same node - l.map(r.nodeForKey(_)).reverse.slice(0, 3).forall(_.toString == "node2") should equal(true) - - // set - l foreach (s => r.processForKey(s)(_.set(s, "working in anshin")) should equal(true)) - - // check get: should return all 5 - r.keys("*").get.size should equal(5) - } - - it("should get keys from proper nodes") { - val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") - - // set - l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) - - r.get("debasish").get should equal("debasish is working in anshin") - r.get("maulindu").get should equal("maulindu is working in anshin") - l.map(r.get(_).get).distinct.size should equal(5) - } - - it("should do all operations on the cluster") { - val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") - - // set - l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) - - r.dbsize.get should equal(5) - r.exists("debasish") should equal(true) - r.exists("maulindu") should equal(true) - r.exists("debasish-1") should equal(false) - - r.del("debasish", "nilanjan").get should equal(2) - r.dbsize.get should equal(3) - r.del("satire").get should equal(0) - } - - it("mget on a cluster should fetch values in the same order as the keys") { - val l = List("debasish", "maulindu", "ramanendu", "nilanjan", "tarun", "tarun", "tarun") - - // set - l foreach (s => r.processForKey(s)(_.set(s, s + " is working in anshin")) should equal(true)) - - // mget - r.mget(l.head, l.tail: _*).get.map(_.get.split(" ")(0)) should equal(l) - } - - it("list operations should work on the cluster"){ - r.lpush("java-virtual-machine-langs", "java") should equal(Some(1)) - r.lpush("java-virtual-machine-langs", "jruby") should equal(Some(2)) - r.lpush("java-virtual-machine-langs", "groovy") should equal(Some(3)) - r.lpush("java-virtual-machine-langs", "scala") should equal(Some(4)) - r.llen("java-virtual-machine-langs") should equal(Some(4)) - } - - it("keytags should ensure mapping to the same server"){ - r.lpush("java-virtual-machine-{langs}", "java") should equal(Some(1)) - r.lpush("java-virtual-machine-{langs}", "jruby") should equal(Some(2)) - r.lpush("java-virtual-machine-{langs}", "groovy") should equal(Some(3)) - r.lpush("java-virtual-machine-{langs}", "scala") should equal(Some(4)) - r.llen("java-virtual-machine-{langs}") should equal(Some(4)) - r.lpush("microsoft-platform-{langs}", "c++") should equal(Some(1)) - r.rpoplpush("java-virtual-machine-{langs}", "microsoft-platform-{langs}").get should equal("java") - r.llen("java-virtual-machine-{langs}") should equal(Some(3)) - r.llen("microsoft-platform-{langs}") should equal(Some(2)) - } - - it("replace node should not change hash ring order"){ - val r = new RedisShards(nodes) { - val keyTag = Some(RegexKeyTag) - } - r.set("testkey1", "testvalue2") - r.get("testkey1") should equal (Some("testvalue2")) - - val nodename = r.hr.getNode(formattedKey("testkey1").toIndexedSeq).toString - - //simulate the same value is duplicated to slave - //for test, don't set to master, just to make sure the expected value is loaded from slave - val redisClient = new RedisClient("localhost", 6382) - redisClient.set("testkey1", "testvalue1") - - //replaced master with slave on the same node - r.replaceServer(ClusterNode(nodename, "localhost", 6382)) - r.get("testkey1") should equal (Some("testvalue1")) - - //switch back to master. the old value is loaded - val oldnode = nodes.filter(_.nodename.equals(nodename))(0) - r.replaceServer(oldnode) - r.get("testkey1") should equal (Some("testvalue2")) - } - - it("remove failure node should change hash ring order so that key on failure node should be served by other running nodes"){ - val r = new RedisShards(nodes) { - val keyTag = Some(RegexKeyTag) - } - r.set("testkey1", "testvalue2") - r.get("testkey1") should equal (Some("testvalue2")) - - val nodename = r.hr.getNode(formattedKey("testkey1").toIndexedSeq).toString - - //replaced master with slave on the same node - r.removeServer(nodename) - r.get("testkey1") should equal (None) - - r.set("testkey1", "testvalue2") - r.get("testkey1") should equal (Some("testvalue2")) - } - - it("list nodes should return the running nodes but not configured nodes"){ - val r = new RedisShards(nodes) { - val keyTag = Some(RegexKeyTag) - } - r.listServers.toSet should equal (nodes.toSet) - r.removeServer("node1") - r.listServers.toSet should equal (nodes.filterNot(_.nodename.equals("node1")).toSet) - } - } }