Update xorm to v1 (#323)
Fix limit for databases other than sqlite go mod tidy && go mod vendor Remove unneeded break statements Make everything work with the new xorm version Fix xorm logging Fix lint Fix redis init Fix using id field Fix database init for testing Change default database log level Add xorm logger Use const for postgres go mod tidy Merge branch 'master' into update/xorm # Conflicts: # go.mod # go.sum # vendor/modules.txt go mod vendor Fix loading fixtures for postgres Go mod vendor1 Update xorm to version 1 Co-authored-by: kolaente <k@knt.li> Reviewed-on: https://kolaente.dev/vikunja/api/pulls/323
This commit is contained in:
99
vendor/xorm.io/xorm/caches/cache.go
generated
vendored
Normal file
99
vendor/xorm.io/xorm/caches/cache.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright 2019 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package caches
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"xorm.io/xorm/schemas"
|
||||
)
|
||||
|
||||
const (
|
||||
// CacheExpired is default cache expired time
|
||||
CacheExpired = 60 * time.Minute
|
||||
// CacheMaxMemory is not use now
|
||||
CacheMaxMemory = 256
|
||||
// CacheGcInterval represents interval time to clear all expired nodes
|
||||
CacheGcInterval = 10 * time.Minute
|
||||
// CacheGcMaxRemoved represents max nodes removed when gc
|
||||
CacheGcMaxRemoved = 20
|
||||
)
|
||||
|
||||
// list all the errors
|
||||
var (
|
||||
ErrCacheMiss = errors.New("xorm/cache: key not found")
|
||||
ErrNotStored = errors.New("xorm/cache: not stored")
|
||||
// ErrNotExist record does not exist error
|
||||
ErrNotExist = errors.New("Record does not exist")
|
||||
)
|
||||
|
||||
// CacheStore is a interface to store cache
|
||||
type CacheStore interface {
|
||||
// key is primary key or composite primary key
|
||||
// value is struct's pointer
|
||||
// key format : <tablename>-p-<pk1>-<pk2>...
|
||||
Put(key string, value interface{}) error
|
||||
Get(key string) (interface{}, error)
|
||||
Del(key string) error
|
||||
}
|
||||
|
||||
// Cacher is an interface to provide cache
|
||||
// id format : u-<pk1>-<pk2>...
|
||||
type Cacher interface {
|
||||
GetIds(tableName, sql string) interface{}
|
||||
GetBean(tableName string, id string) interface{}
|
||||
PutIds(tableName, sql string, ids interface{})
|
||||
PutBean(tableName string, id string, obj interface{})
|
||||
DelIds(tableName, sql string)
|
||||
DelBean(tableName string, id string)
|
||||
ClearIds(tableName string)
|
||||
ClearBeans(tableName string)
|
||||
}
|
||||
|
||||
func encodeIds(ids []schemas.PK) (string, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
enc := gob.NewEncoder(buf)
|
||||
err := enc.Encode(ids)
|
||||
|
||||
return buf.String(), err
|
||||
}
|
||||
|
||||
func decodeIds(s string) ([]schemas.PK, error) {
|
||||
pks := make([]schemas.PK, 0)
|
||||
|
||||
dec := gob.NewDecoder(strings.NewReader(s))
|
||||
err := dec.Decode(&pks)
|
||||
|
||||
return pks, err
|
||||
}
|
||||
|
||||
// GetCacheSql returns cacher PKs via SQL
|
||||
func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]schemas.PK, error) {
|
||||
bytes := m.GetIds(tableName, GenSqlKey(sql, args))
|
||||
if bytes == nil {
|
||||
return nil, errors.New("Not Exist")
|
||||
}
|
||||
return decodeIds(bytes.(string))
|
||||
}
|
||||
|
||||
// PutCacheSql puts cacher SQL and PKs
|
||||
func PutCacheSql(m Cacher, ids []schemas.PK, tableName, sql string, args interface{}) error {
|
||||
bytes, err := encodeIds(ids)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.PutIds(tableName, GenSqlKey(sql, args), bytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenSqlKey generates cache key
|
||||
func GenSqlKey(sql string, args interface{}) string {
|
||||
return fmt.Sprintf("%v-%v", sql, args)
|
||||
}
|
58
vendor/xorm.io/xorm/caches/encode.go
generated
vendored
Normal file
58
vendor/xorm.io/xorm/caches/encode.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2020 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package caches
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// md5 hash string
|
||||
func Md5(str string) string {
|
||||
m := md5.New()
|
||||
io.WriteString(m, str)
|
||||
return fmt.Sprintf("%x", m.Sum(nil))
|
||||
}
|
||||
func Encode(data interface{}) ([]byte, error) {
|
||||
//return JsonEncode(data)
|
||||
return GobEncode(data)
|
||||
}
|
||||
|
||||
func Decode(data []byte, to interface{}) error {
|
||||
//return JsonDecode(data, to)
|
||||
return GobDecode(data, to)
|
||||
}
|
||||
|
||||
func GobEncode(data interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
enc := gob.NewEncoder(&buf)
|
||||
err := enc.Encode(&data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func GobDecode(data []byte, to interface{}) error {
|
||||
buf := bytes.NewBuffer(data)
|
||||
dec := gob.NewDecoder(buf)
|
||||
return dec.Decode(to)
|
||||
}
|
||||
|
||||
func JsonEncode(data interface{}) ([]byte, error) {
|
||||
val, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func JsonDecode(data []byte, to interface{}) error {
|
||||
return json.Unmarshal(data, to)
|
||||
}
|
94
vendor/xorm.io/xorm/caches/leveldb.go
generated
vendored
Normal file
94
vendor/xorm.io/xorm/caches/leveldb.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
// Copyright 2020 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package caches
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
// LevelDBStore implements CacheStore provide local machine
|
||||
type LevelDBStore struct {
|
||||
store *leveldb.DB
|
||||
Debug bool
|
||||
v interface{}
|
||||
}
|
||||
|
||||
var _ CacheStore = &LevelDBStore{}
|
||||
|
||||
func NewLevelDBStore(dbfile string) (*LevelDBStore, error) {
|
||||
db := &LevelDBStore{}
|
||||
h, err := leveldb.OpenFile(dbfile, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.store = h
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (s *LevelDBStore) Put(key string, value interface{}) error {
|
||||
val, err := Encode(value)
|
||||
if err != nil {
|
||||
if s.Debug {
|
||||
log.Println("[LevelDB]EncodeErr: ", err, "Key:", key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = s.store.Put([]byte(key), val, nil)
|
||||
if err != nil {
|
||||
if s.Debug {
|
||||
log.Println("[LevelDB]PutErr: ", err, "Key:", key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if s.Debug {
|
||||
log.Println("[LevelDB]Put: ", key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *LevelDBStore) Get(key string) (interface{}, error) {
|
||||
data, err := s.store.Get([]byte(key), nil)
|
||||
if err != nil {
|
||||
if s.Debug {
|
||||
log.Println("[LevelDB]GetErr: ", err, "Key:", key)
|
||||
}
|
||||
if err == leveldb.ErrNotFound {
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = Decode(data, &s.v)
|
||||
if err != nil {
|
||||
if s.Debug {
|
||||
log.Println("[LevelDB]DecodeErr: ", err, "Key:", key)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if s.Debug {
|
||||
log.Println("[LevelDB]Get: ", key, s.v)
|
||||
}
|
||||
return s.v, err
|
||||
}
|
||||
|
||||
func (s *LevelDBStore) Del(key string) error {
|
||||
err := s.store.Delete([]byte(key), nil)
|
||||
if err != nil {
|
||||
if s.Debug {
|
||||
log.Println("[LevelDB]DelErr: ", err, "Key:", key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if s.Debug {
|
||||
log.Println("[LevelDB]Del: ", key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *LevelDBStore) Close() {
|
||||
s.store.Close()
|
||||
}
|
282
vendor/xorm.io/xorm/caches/lru.go
generated
vendored
Normal file
282
vendor/xorm.io/xorm/caches/lru.go
generated
vendored
Normal file
@ -0,0 +1,282 @@
|
||||
// Copyright 2015 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package caches
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LRUCacher implments cache object facilities
|
||||
type LRUCacher struct {
|
||||
idList *list.List
|
||||
sqlList *list.List
|
||||
idIndex map[string]map[string]*list.Element
|
||||
sqlIndex map[string]map[string]*list.Element
|
||||
store CacheStore
|
||||
mutex sync.Mutex
|
||||
MaxElementSize int
|
||||
Expired time.Duration
|
||||
GcInterval time.Duration
|
||||
}
|
||||
|
||||
// NewLRUCacher creates a cacher
|
||||
func NewLRUCacher(store CacheStore, maxElementSize int) *LRUCacher {
|
||||
return NewLRUCacher2(store, 3600*time.Second, maxElementSize)
|
||||
}
|
||||
|
||||
// NewLRUCacher2 creates a cache include different params
|
||||
func NewLRUCacher2(store CacheStore, expired time.Duration, maxElementSize int) *LRUCacher {
|
||||
cacher := &LRUCacher{store: store, idList: list.New(),
|
||||
sqlList: list.New(), Expired: expired,
|
||||
GcInterval: CacheGcInterval, MaxElementSize: maxElementSize,
|
||||
sqlIndex: make(map[string]map[string]*list.Element),
|
||||
idIndex: make(map[string]map[string]*list.Element),
|
||||
}
|
||||
cacher.RunGC()
|
||||
return cacher
|
||||
}
|
||||
|
||||
// RunGC run once every m.GcInterval
|
||||
func (m *LRUCacher) RunGC() {
|
||||
time.AfterFunc(m.GcInterval, func() {
|
||||
m.RunGC()
|
||||
m.GC()
|
||||
})
|
||||
}
|
||||
|
||||
// GC check ids lit and sql list to remove all element expired
|
||||
func (m *LRUCacher) GC() {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
var removedNum int
|
||||
for e := m.idList.Front(); e != nil; {
|
||||
if removedNum <= CacheGcMaxRemoved &&
|
||||
time.Now().Sub(e.Value.(*idNode).lastVisit) > m.Expired {
|
||||
removedNum++
|
||||
next := e.Next()
|
||||
node := e.Value.(*idNode)
|
||||
m.delBean(node.tbName, node.id)
|
||||
e = next
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
removedNum = 0
|
||||
for e := m.sqlList.Front(); e != nil; {
|
||||
if removedNum <= CacheGcMaxRemoved &&
|
||||
time.Now().Sub(e.Value.(*sqlNode).lastVisit) > m.Expired {
|
||||
removedNum++
|
||||
next := e.Next()
|
||||
node := e.Value.(*sqlNode)
|
||||
m.delIds(node.tbName, node.sql)
|
||||
e = next
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetIds returns all bean's ids according to sql and parameter from cache
|
||||
func (m *LRUCacher) GetIds(tableName, sql string) interface{} {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
if v, err := m.store.Get(sql); err == nil {
|
||||
if el, ok := m.sqlIndex[tableName][sql]; !ok {
|
||||
el = m.sqlList.PushBack(newSQLNode(tableName, sql))
|
||||
m.sqlIndex[tableName][sql] = el
|
||||
} else {
|
||||
lastTime := el.Value.(*sqlNode).lastVisit
|
||||
// if expired, remove the node and return nil
|
||||
if time.Now().Sub(lastTime) > m.Expired {
|
||||
m.delIds(tableName, sql)
|
||||
return nil
|
||||
}
|
||||
m.sqlList.MoveToBack(el)
|
||||
el.Value.(*sqlNode).lastVisit = time.Now()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
m.delIds(tableName, sql)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBean returns bean according tableName and id from cache
|
||||
func (m *LRUCacher) GetBean(tableName string, id string) interface{} {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.idIndex[tableName]; !ok {
|
||||
m.idIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
tid := genID(tableName, id)
|
||||
if v, err := m.store.Get(tid); err == nil {
|
||||
if el, ok := m.idIndex[tableName][id]; ok {
|
||||
lastTime := el.Value.(*idNode).lastVisit
|
||||
// if expired, remove the node and return nil
|
||||
if time.Now().Sub(lastTime) > m.Expired {
|
||||
m.delBean(tableName, id)
|
||||
return nil
|
||||
}
|
||||
m.idList.MoveToBack(el)
|
||||
el.Value.(*idNode).lastVisit = time.Now()
|
||||
} else {
|
||||
el = m.idList.PushBack(newIDNode(tableName, id))
|
||||
m.idIndex[tableName][id] = el
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// store bean is not exist, then remove memory's index
|
||||
m.delBean(tableName, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearIds clears all sql-ids mapping on table tableName from cache
|
||||
func (m *LRUCacher) clearIds(tableName string) {
|
||||
if tis, ok := m.sqlIndex[tableName]; ok {
|
||||
for sql, v := range tis {
|
||||
m.sqlList.Remove(v)
|
||||
m.store.Del(sql)
|
||||
}
|
||||
}
|
||||
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
|
||||
// ClearIds clears all sql-ids mapping on table tableName from cache
|
||||
func (m *LRUCacher) ClearIds(tableName string) {
|
||||
m.mutex.Lock()
|
||||
m.clearIds(tableName)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *LRUCacher) clearBeans(tableName string) {
|
||||
if tis, ok := m.idIndex[tableName]; ok {
|
||||
for id, v := range tis {
|
||||
m.idList.Remove(v)
|
||||
tid := genID(tableName, id)
|
||||
m.store.Del(tid)
|
||||
}
|
||||
}
|
||||
m.idIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
|
||||
// ClearBeans clears all beans in some table
|
||||
func (m *LRUCacher) ClearBeans(tableName string) {
|
||||
m.mutex.Lock()
|
||||
m.clearBeans(tableName)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
// PutIds pus ids into table
|
||||
func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) {
|
||||
m.mutex.Lock()
|
||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
if el, ok := m.sqlIndex[tableName][sql]; !ok {
|
||||
el = m.sqlList.PushBack(newSQLNode(tableName, sql))
|
||||
m.sqlIndex[tableName][sql] = el
|
||||
} else {
|
||||
el.Value.(*sqlNode).lastVisit = time.Now()
|
||||
}
|
||||
m.store.Put(sql, ids)
|
||||
if m.sqlList.Len() > m.MaxElementSize {
|
||||
e := m.sqlList.Front()
|
||||
node := e.Value.(*sqlNode)
|
||||
m.delIds(node.tbName, node.sql)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
// PutBean puts beans into table
|
||||
func (m *LRUCacher) PutBean(tableName string, id string, obj interface{}) {
|
||||
m.mutex.Lock()
|
||||
var el *list.Element
|
||||
var ok bool
|
||||
|
||||
if el, ok = m.idIndex[tableName][id]; !ok {
|
||||
el = m.idList.PushBack(newIDNode(tableName, id))
|
||||
m.idIndex[tableName][id] = el
|
||||
} else {
|
||||
el.Value.(*idNode).lastVisit = time.Now()
|
||||
}
|
||||
|
||||
m.store.Put(genID(tableName, id), obj)
|
||||
if m.idList.Len() > m.MaxElementSize {
|
||||
e := m.idList.Front()
|
||||
node := e.Value.(*idNode)
|
||||
m.delBean(node.tbName, node.id)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *LRUCacher) delIds(tableName, sql string) {
|
||||
if _, ok := m.sqlIndex[tableName]; ok {
|
||||
if el, ok := m.sqlIndex[tableName][sql]; ok {
|
||||
delete(m.sqlIndex[tableName], sql)
|
||||
m.sqlList.Remove(el)
|
||||
}
|
||||
}
|
||||
m.store.Del(sql)
|
||||
}
|
||||
|
||||
// DelIds deletes ids
|
||||
func (m *LRUCacher) DelIds(tableName, sql string) {
|
||||
m.mutex.Lock()
|
||||
m.delIds(tableName, sql)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *LRUCacher) delBean(tableName string, id string) {
|
||||
tid := genID(tableName, id)
|
||||
if el, ok := m.idIndex[tableName][id]; ok {
|
||||
delete(m.idIndex[tableName], id)
|
||||
m.idList.Remove(el)
|
||||
m.clearIds(tableName)
|
||||
}
|
||||
m.store.Del(tid)
|
||||
}
|
||||
|
||||
// DelBean deletes beans in some table
|
||||
func (m *LRUCacher) DelBean(tableName string, id string) {
|
||||
m.mutex.Lock()
|
||||
m.delBean(tableName, id)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
type idNode struct {
|
||||
tbName string
|
||||
id string
|
||||
lastVisit time.Time
|
||||
}
|
||||
|
||||
type sqlNode struct {
|
||||
tbName string
|
||||
sql string
|
||||
lastVisit time.Time
|
||||
}
|
||||
|
||||
func genSQLKey(sql string, args interface{}) string {
|
||||
return fmt.Sprintf("%s-%v", sql, args)
|
||||
}
|
||||
|
||||
func genID(prefix string, id string) string {
|
||||
return fmt.Sprintf("%s-%s", prefix, id)
|
||||
}
|
||||
|
||||
func newIDNode(tbName string, id string) *idNode {
|
||||
return &idNode{tbName, id, time.Now()}
|
||||
}
|
||||
|
||||
func newSQLNode(tbName, sql string) *sqlNode {
|
||||
return &sqlNode{tbName, sql, time.Now()}
|
||||
}
|
56
vendor/xorm.io/xorm/caches/manager.go
generated
vendored
Normal file
56
vendor/xorm.io/xorm/caches/manager.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2020 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package caches
|
||||
|
||||
import "sync"
|
||||
|
||||
type Manager struct {
|
||||
cacher Cacher
|
||||
disableGlobalCache bool
|
||||
|
||||
cachers map[string]Cacher
|
||||
cacherLock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewManager() *Manager {
|
||||
return &Manager{
|
||||
cachers: make(map[string]Cacher),
|
||||
}
|
||||
}
|
||||
|
||||
// SetDisableGlobalCache disable global cache or not
|
||||
func (mgr *Manager) SetDisableGlobalCache(disable bool) {
|
||||
if mgr.disableGlobalCache != disable {
|
||||
mgr.disableGlobalCache = disable
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *Manager) SetCacher(tableName string, cacher Cacher) {
|
||||
mgr.cacherLock.Lock()
|
||||
mgr.cachers[tableName] = cacher
|
||||
mgr.cacherLock.Unlock()
|
||||
}
|
||||
|
||||
func (mgr *Manager) GetCacher(tableName string) Cacher {
|
||||
var cacher Cacher
|
||||
var ok bool
|
||||
mgr.cacherLock.RLock()
|
||||
cacher, ok = mgr.cachers[tableName]
|
||||
mgr.cacherLock.RUnlock()
|
||||
if !ok && !mgr.disableGlobalCache {
|
||||
cacher = mgr.cacher
|
||||
}
|
||||
return cacher
|
||||
}
|
||||
|
||||
// SetDefaultCacher set the default cacher. Xorm's default not enable cacher.
|
||||
func (mgr *Manager) SetDefaultCacher(cacher Cacher) {
|
||||
mgr.cacher = cacher
|
||||
}
|
||||
|
||||
// GetDefaultCacher returns the default cacher
|
||||
func (mgr *Manager) GetDefaultCacher() Cacher {
|
||||
return mgr.cacher
|
||||
}
|
49
vendor/xorm.io/xorm/caches/memory_store.go
generated
vendored
Normal file
49
vendor/xorm.io/xorm/caches/memory_store.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2015 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package caches
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ CacheStore = NewMemoryStore()
|
||||
|
||||
// MemoryStore represents in-memory store
|
||||
type MemoryStore struct {
|
||||
store map[interface{}]interface{}
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMemoryStore creates a new store in memory
|
||||
func NewMemoryStore() *MemoryStore {
|
||||
return &MemoryStore{store: make(map[interface{}]interface{})}
|
||||
}
|
||||
|
||||
// Put puts object into store
|
||||
func (s *MemoryStore) Put(key string, value interface{}) error {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.store[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets object from store
|
||||
func (s *MemoryStore) Get(key string) (interface{}, error) {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
if v, ok := s.store[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
|
||||
// Del deletes object
|
||||
func (s *MemoryStore) Del(key string) error {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
delete(s.store, key)
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user