本站源代码
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

510 lines
9.0KB

  1. package nodb
  2. import (
  3. "encoding/binary"
  4. "errors"
  5. "time"
  6. "github.com/lunny/nodb/store"
  7. )
  8. type FVPair struct {
  9. Field []byte
  10. Value []byte
  11. }
  12. var errHashKey = errors.New("invalid hash key")
  13. var errHSizeKey = errors.New("invalid hsize key")
  14. const (
  15. hashStartSep byte = ':'
  16. hashStopSep byte = hashStartSep + 1
  17. )
  18. func checkHashKFSize(key []byte, field []byte) error {
  19. if len(key) > MaxKeySize || len(key) == 0 {
  20. return errKeySize
  21. } else if len(field) > MaxHashFieldSize || len(field) == 0 {
  22. return errHashFieldSize
  23. }
  24. return nil
  25. }
  26. func (db *DB) hEncodeSizeKey(key []byte) []byte {
  27. buf := make([]byte, len(key)+2)
  28. buf[0] = db.index
  29. buf[1] = HSizeType
  30. copy(buf[2:], key)
  31. return buf
  32. }
  33. func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) {
  34. if len(ek) < 2 || ek[0] != db.index || ek[1] != HSizeType {
  35. return nil, errHSizeKey
  36. }
  37. return ek[2:], nil
  38. }
  39. func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte {
  40. buf := make([]byte, len(key)+len(field)+1+1+2+1)
  41. pos := 0
  42. buf[pos] = db.index
  43. pos++
  44. buf[pos] = HashType
  45. pos++
  46. binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
  47. pos += 2
  48. copy(buf[pos:], key)
  49. pos += len(key)
  50. buf[pos] = hashStartSep
  51. pos++
  52. copy(buf[pos:], field)
  53. return buf
  54. }
  55. func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) {
  56. if len(ek) < 5 || ek[0] != db.index || ek[1] != HashType {
  57. return nil, nil, errHashKey
  58. }
  59. pos := 2
  60. keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
  61. pos += 2
  62. if keyLen+5 > len(ek) {
  63. return nil, nil, errHashKey
  64. }
  65. key := ek[pos : pos+keyLen]
  66. pos += keyLen
  67. if ek[pos] != hashStartSep {
  68. return nil, nil, errHashKey
  69. }
  70. pos++
  71. field := ek[pos:]
  72. return key, field, nil
  73. }
  74. func (db *DB) hEncodeStartKey(key []byte) []byte {
  75. return db.hEncodeHashKey(key, nil)
  76. }
  77. func (db *DB) hEncodeStopKey(key []byte) []byte {
  78. k := db.hEncodeHashKey(key, nil)
  79. k[len(k)-1] = hashStopSep
  80. return k
  81. }
  82. func (db *DB) hSetItem(key []byte, field []byte, value []byte) (int64, error) {
  83. t := db.hashBatch
  84. ek := db.hEncodeHashKey(key, field)
  85. var n int64 = 1
  86. if v, _ := db.bucket.Get(ek); v != nil {
  87. n = 0
  88. } else {
  89. if _, err := db.hIncrSize(key, 1); err != nil {
  90. return 0, err
  91. }
  92. }
  93. t.Put(ek, value)
  94. return n, nil
  95. }
  96. // ps : here just focus on deleting the hash data,
  97. // any other likes expire is ignore.
  98. func (db *DB) hDelete(t *batch, key []byte) int64 {
  99. sk := db.hEncodeSizeKey(key)
  100. start := db.hEncodeStartKey(key)
  101. stop := db.hEncodeStopKey(key)
  102. var num int64 = 0
  103. it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
  104. for ; it.Valid(); it.Next() {
  105. t.Delete(it.Key())
  106. num++
  107. }
  108. it.Close()
  109. t.Delete(sk)
  110. return num
  111. }
  112. func (db *DB) hExpireAt(key []byte, when int64) (int64, error) {
  113. t := db.hashBatch
  114. t.Lock()
  115. defer t.Unlock()
  116. if hlen, err := db.HLen(key); err != nil || hlen == 0 {
  117. return 0, err
  118. } else {
  119. db.expireAt(t, HashType, key, when)
  120. if err := t.Commit(); err != nil {
  121. return 0, err
  122. }
  123. }
  124. return 1, nil
  125. }
  126. func (db *DB) HLen(key []byte) (int64, error) {
  127. if err := checkKeySize(key); err != nil {
  128. return 0, err
  129. }
  130. return Int64(db.bucket.Get(db.hEncodeSizeKey(key)))
  131. }
  132. func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) {
  133. if err := checkHashKFSize(key, field); err != nil {
  134. return 0, err
  135. } else if err := checkValueSize(value); err != nil {
  136. return 0, err
  137. }
  138. t := db.hashBatch
  139. t.Lock()
  140. defer t.Unlock()
  141. n, err := db.hSetItem(key, field, value)
  142. if err != nil {
  143. return 0, err
  144. }
  145. //todo add binlog
  146. err = t.Commit()
  147. return n, err
  148. }
  149. func (db *DB) HGet(key []byte, field []byte) ([]byte, error) {
  150. if err := checkHashKFSize(key, field); err != nil {
  151. return nil, err
  152. }
  153. return db.bucket.Get(db.hEncodeHashKey(key, field))
  154. }
  155. func (db *DB) HMset(key []byte, args ...FVPair) error {
  156. t := db.hashBatch
  157. t.Lock()
  158. defer t.Unlock()
  159. var err error
  160. var ek []byte
  161. var num int64 = 0
  162. for i := 0; i < len(args); i++ {
  163. if err := checkHashKFSize(key, args[i].Field); err != nil {
  164. return err
  165. } else if err := checkValueSize(args[i].Value); err != nil {
  166. return err
  167. }
  168. ek = db.hEncodeHashKey(key, args[i].Field)
  169. if v, err := db.bucket.Get(ek); err != nil {
  170. return err
  171. } else if v == nil {
  172. num++
  173. }
  174. t.Put(ek, args[i].Value)
  175. }
  176. if _, err = db.hIncrSize(key, num); err != nil {
  177. return err
  178. }
  179. //todo add binglog
  180. err = t.Commit()
  181. return err
  182. }
  183. func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) {
  184. var ek []byte
  185. it := db.bucket.NewIterator()
  186. defer it.Close()
  187. r := make([][]byte, len(args))
  188. for i := 0; i < len(args); i++ {
  189. if err := checkHashKFSize(key, args[i]); err != nil {
  190. return nil, err
  191. }
  192. ek = db.hEncodeHashKey(key, args[i])
  193. r[i] = it.Find(ek)
  194. }
  195. return r, nil
  196. }
  197. func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) {
  198. t := db.hashBatch
  199. var ek []byte
  200. var v []byte
  201. var err error
  202. t.Lock()
  203. defer t.Unlock()
  204. it := db.bucket.NewIterator()
  205. defer it.Close()
  206. var num int64 = 0
  207. for i := 0; i < len(args); i++ {
  208. if err := checkHashKFSize(key, args[i]); err != nil {
  209. return 0, err
  210. }
  211. ek = db.hEncodeHashKey(key, args[i])
  212. v = it.RawFind(ek)
  213. if v == nil {
  214. continue
  215. } else {
  216. num++
  217. t.Delete(ek)
  218. }
  219. }
  220. if _, err = db.hIncrSize(key, -num); err != nil {
  221. return 0, err
  222. }
  223. err = t.Commit()
  224. return num, err
  225. }
  226. func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) {
  227. t := db.hashBatch
  228. sk := db.hEncodeSizeKey(key)
  229. var err error
  230. var size int64 = 0
  231. if size, err = Int64(db.bucket.Get(sk)); err != nil {
  232. return 0, err
  233. } else {
  234. size += delta
  235. if size <= 0 {
  236. size = 0
  237. t.Delete(sk)
  238. db.rmExpire(t, HashType, key)
  239. } else {
  240. t.Put(sk, PutInt64(size))
  241. }
  242. }
  243. return size, nil
  244. }
  245. func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) {
  246. if err := checkHashKFSize(key, field); err != nil {
  247. return 0, err
  248. }
  249. t := db.hashBatch
  250. var ek []byte
  251. var err error
  252. t.Lock()
  253. defer t.Unlock()
  254. ek = db.hEncodeHashKey(key, field)
  255. var n int64 = 0
  256. if n, err = StrInt64(db.bucket.Get(ek)); err != nil {
  257. return 0, err
  258. }
  259. n += delta
  260. _, err = db.hSetItem(key, field, StrPutInt64(n))
  261. if err != nil {
  262. return 0, err
  263. }
  264. err = t.Commit()
  265. return n, err
  266. }
  267. func (db *DB) HGetAll(key []byte) ([]FVPair, error) {
  268. if err := checkKeySize(key); err != nil {
  269. return nil, err
  270. }
  271. start := db.hEncodeStartKey(key)
  272. stop := db.hEncodeStopKey(key)
  273. v := make([]FVPair, 0, 16)
  274. it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
  275. for ; it.Valid(); it.Next() {
  276. _, f, err := db.hDecodeHashKey(it.Key())
  277. if err != nil {
  278. return nil, err
  279. }
  280. v = append(v, FVPair{Field: f, Value: it.Value()})
  281. }
  282. it.Close()
  283. return v, nil
  284. }
  285. func (db *DB) HKeys(key []byte) ([][]byte, error) {
  286. if err := checkKeySize(key); err != nil {
  287. return nil, err
  288. }
  289. start := db.hEncodeStartKey(key)
  290. stop := db.hEncodeStopKey(key)
  291. v := make([][]byte, 0, 16)
  292. it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
  293. for ; it.Valid(); it.Next() {
  294. _, f, err := db.hDecodeHashKey(it.Key())
  295. if err != nil {
  296. return nil, err
  297. }
  298. v = append(v, f)
  299. }
  300. it.Close()
  301. return v, nil
  302. }
  303. func (db *DB) HValues(key []byte) ([][]byte, error) {
  304. if err := checkKeySize(key); err != nil {
  305. return nil, err
  306. }
  307. start := db.hEncodeStartKey(key)
  308. stop := db.hEncodeStopKey(key)
  309. v := make([][]byte, 0, 16)
  310. it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
  311. for ; it.Valid(); it.Next() {
  312. _, _, err := db.hDecodeHashKey(it.Key())
  313. if err != nil {
  314. return nil, err
  315. }
  316. v = append(v, it.Value())
  317. }
  318. it.Close()
  319. return v, nil
  320. }
  321. func (db *DB) HClear(key []byte) (int64, error) {
  322. if err := checkKeySize(key); err != nil {
  323. return 0, err
  324. }
  325. t := db.hashBatch
  326. t.Lock()
  327. defer t.Unlock()
  328. num := db.hDelete(t, key)
  329. db.rmExpire(t, HashType, key)
  330. err := t.Commit()
  331. return num, err
  332. }
  333. func (db *DB) HMclear(keys ...[]byte) (int64, error) {
  334. t := db.hashBatch
  335. t.Lock()
  336. defer t.Unlock()
  337. for _, key := range keys {
  338. if err := checkKeySize(key); err != nil {
  339. return 0, err
  340. }
  341. db.hDelete(t, key)
  342. db.rmExpire(t, HashType, key)
  343. }
  344. err := t.Commit()
  345. return int64(len(keys)), err
  346. }
  347. func (db *DB) hFlush() (drop int64, err error) {
  348. t := db.hashBatch
  349. t.Lock()
  350. defer t.Unlock()
  351. return db.flushType(t, HashType)
  352. }
  353. func (db *DB) HScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
  354. return db.scan(HSizeType, key, count, inclusive, match)
  355. }
  356. func (db *DB) HExpire(key []byte, duration int64) (int64, error) {
  357. if duration <= 0 {
  358. return 0, errExpireValue
  359. }
  360. return db.hExpireAt(key, time.Now().Unix()+duration)
  361. }
  362. func (db *DB) HExpireAt(key []byte, when int64) (int64, error) {
  363. if when <= time.Now().Unix() {
  364. return 0, errExpireValue
  365. }
  366. return db.hExpireAt(key, when)
  367. }
  368. func (db *DB) HTTL(key []byte) (int64, error) {
  369. if err := checkKeySize(key); err != nil {
  370. return -1, err
  371. }
  372. return db.ttl(HashType, key)
  373. }
  374. func (db *DB) HPersist(key []byte) (int64, error) {
  375. if err := checkKeySize(key); err != nil {
  376. return 0, err
  377. }
  378. t := db.hashBatch
  379. t.Lock()
  380. defer t.Unlock()
  381. n, err := db.rmExpire(t, HashType, key)
  382. if err != nil {
  383. return 0, err
  384. }
  385. err = t.Commit()
  386. return n, err
  387. }
上海开阖软件有限公司 沪ICP备12045867号-1