1
0

Migrated to dep instead of govendor

This commit is contained in:
kolaente
2018-07-24 13:49:45 +02:00
committed by konrad
parent 0a2eae120e
commit 9c0c9474e8
92 changed files with 14968 additions and 14649 deletions

View File

@ -1,11 +1,12 @@
package core
import (
"errors"
"fmt"
"time"
"bytes"
"encoding/gob"
"errors"
"fmt"
"strings"
"time"
)
const (
@ -55,11 +56,10 @@ func encodeIds(ids []PK) (string, error) {
return buf.String(), err
}
func decodeIds(s string) ([]PK, error) {
pks := make([]PK, 0)
dec := gob.NewDecoder(bytes.NewBufferString(s))
dec := gob.NewDecoder(strings.NewReader(s))
err := dec.Decode(&pks)
return pks, err

View File

@ -11,4 +11,5 @@ database:
test:
override:
# './...' is a relative pattern which means all subdirectories
- go test -v -race
- go test -v -race
- go test -v -race --dbtype=sqlite3

View File

@ -79,6 +79,10 @@ func (col *Column) String(d Dialect) string {
}
}
if col.Default != "" {
sql += "DEFAULT " + col.Default + " "
}
if d.ShowCreateNull() {
if col.Nullable {
sql += "NULL "
@ -87,10 +91,6 @@ func (col *Column) String(d Dialect) string {
}
}
if col.Default != "" {
sql += "DEFAULT " + col.Default + " "
}
return sql
}
@ -99,6 +99,10 @@ func (col *Column) StringNoPk(d Dialect) string {
sql += d.SqlType(col) + " "
if col.Default != "" {
sql += "DEFAULT " + col.Default + " "
}
if d.ShowCreateNull() {
if col.Nullable {
sql += "NULL "
@ -107,10 +111,6 @@ func (col *Column) StringNoPk(d Dialect) string {
}
}
if col.Default != "" {
sql += "DEFAULT " + col.Default + " "
}
return sql
}

57
vendor/github.com/go-xorm/core/db.go generated vendored
View File

@ -7,6 +7,11 @@ import (
"fmt"
"reflect"
"regexp"
"sync"
)
var (
DefaultCacheSize = 200
)
func MapToSlice(query string, mp interface{}) (string, []interface{}, error) {
@ -58,9 +63,16 @@ func StructToSlice(query string, st interface{}) (string, []interface{}, error)
return query, args, nil
}
type cacheStruct struct {
value reflect.Value
idx int
}
type DB struct {
*sql.DB
Mapper IMapper
Mapper IMapper
reflectCache map[reflect.Type]*cacheStruct
reflectCacheMutex sync.RWMutex
}
func Open(driverName, dataSourceName string) (*DB, error) {
@ -68,11 +80,32 @@ func Open(driverName, dataSourceName string) (*DB, error) {
if err != nil {
return nil, err
}
return &DB{db, NewCacheMapper(&SnakeMapper{})}, nil
return &DB{
DB: db,
Mapper: NewCacheMapper(&SnakeMapper{}),
reflectCache: make(map[reflect.Type]*cacheStruct),
}, nil
}
func FromDB(db *sql.DB) *DB {
return &DB{db, NewCacheMapper(&SnakeMapper{})}
return &DB{
DB: db,
Mapper: NewCacheMapper(&SnakeMapper{}),
reflectCache: make(map[reflect.Type]*cacheStruct),
}
}
func (db *DB) reflectNew(typ reflect.Type) reflect.Value {
db.reflectCacheMutex.Lock()
defer db.reflectCacheMutex.Unlock()
cs, ok := db.reflectCache[typ]
if !ok || cs.idx+1 > DefaultCacheSize-1 {
cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), DefaultCacheSize, DefaultCacheSize), 0}
db.reflectCache[typ] = cs
} else {
cs.idx = cs.idx + 1
}
return cs.value.Index(cs.idx).Addr()
}
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
@ -83,7 +116,7 @@ func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
}
return nil, err
}
return &Rows{rows, db.Mapper}, nil
return &Rows{rows, db}, nil
}
func (db *DB) QueryMap(query string, mp interface{}) (*Rows, error) {
@ -128,8 +161,8 @@ func (db *DB) QueryRowStruct(query string, st interface{}) *Row {
type Stmt struct {
*sql.Stmt
Mapper IMapper
names map[string]int
db *DB
names map[string]int
}
func (db *DB) Prepare(query string) (*Stmt, error) {
@ -145,7 +178,7 @@ func (db *DB) Prepare(query string) (*Stmt, error) {
if err != nil {
return nil, err
}
return &Stmt{stmt, db.Mapper, names}, nil
return &Stmt{stmt, db, names}, nil
}
func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) {
@ -179,7 +212,7 @@ func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
if err != nil {
return nil, err
}
return &Rows{rows, s.Mapper}, nil
return &Rows{rows, s.db}, nil
}
func (s *Stmt) QueryMap(mp interface{}) (*Rows, error) {
@ -274,7 +307,7 @@ func (EmptyScanner) Scan(src interface{}) error {
type Tx struct {
*sql.Tx
Mapper IMapper
db *DB
}
func (db *DB) Begin() (*Tx, error) {
@ -282,7 +315,7 @@ func (db *DB) Begin() (*Tx, error) {
if err != nil {
return nil, err
}
return &Tx{tx, db.Mapper}, nil
return &Tx{tx, db}, nil
}
func (tx *Tx) Prepare(query string) (*Stmt, error) {
@ -298,7 +331,7 @@ func (tx *Tx) Prepare(query string) (*Stmt, error) {
if err != nil {
return nil, err
}
return &Stmt{stmt, tx.Mapper, names}, nil
return &Stmt{stmt, tx.db, names}, nil
}
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
@ -327,7 +360,7 @@ func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
if err != nil {
return nil, err
}
return &Rows{rows, tx.Mapper}, nil
return &Rows{rows, tx.db}, nil
}
func (tx *Tx) QueryMap(query string, mp interface{}) (*Rows, error) {

View File

@ -74,6 +74,7 @@ type Dialect interface {
GetIndexes(tableName string) (map[string]*Index, error)
Filters() []Filter
SetParams(params map[string]string)
}
func OpenDialect(dialect Dialect) (*DB, error) {
@ -148,7 +149,8 @@ func (db *Base) SupportDropIfExists() bool {
}
func (db *Base) DropTableSql(tableName string) string {
return fmt.Sprintf("DROP TABLE IF EXISTS `%s`", tableName)
quote := db.dialect.Quote
return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName))
}
func (db *Base) HasRecords(query string, args ...interface{}) (bool, error) {
@ -289,6 +291,9 @@ func (b *Base) LogSQL(sql string, args []interface{}) {
}
}
func (b *Base) SetParams(params map[string]string) {
}
var (
dialects = map[string]func() Dialect{}
)

View File

@ -37,9 +37,9 @@ func (q *Quoter) Quote(content string) string {
func (i *IdFilter) Do(sql string, dialect Dialect, table *Table) string {
quoter := NewQuoter(dialect)
if table != nil && len(table.PrimaryKeys) == 1 {
sql = strings.Replace(sql, "`(id)`", quoter.Quote(table.PrimaryKeys[0]), -1)
sql = strings.Replace(sql, quoter.Quote("(id)"), quoter.Quote(table.PrimaryKeys[0]), -1)
return strings.Replace(sql, "(id)", quoter.Quote(table.PrimaryKeys[0]), -1)
sql = strings.Replace(sql, " `(id)` ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
sql = strings.Replace(sql, " "+quoter.Quote("(id)")+" ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
return strings.Replace(sql, " (id) ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
}
return sql
}

View File

@ -22,6 +22,8 @@ type Index struct {
func (index *Index) XName(tableName string) string {
if !strings.HasPrefix(index.Name, "UQE_") &&
!strings.HasPrefix(index.Name, "IDX_") {
tableName = strings.Replace(tableName, `"`, "", -1)
tableName = strings.Replace(tableName, `.`, "_", -1)
if index.Type == UniqueType {
return fmt.Sprintf("UQE_%v_%v", tableName, index.Name)
}

View File

@ -9,7 +9,7 @@ import (
type Rows struct {
*sql.Rows
Mapper IMapper
db *DB
}
func (rs *Rows) ToMapString() ([]map[string]string, error) {
@ -105,7 +105,7 @@ func (rs *Rows) ScanStructByName(dest interface{}) error {
newDest := make([]interface{}, len(cols))
var v EmptyScanner
for j, name := range cols {
f := fieldByName(vv.Elem(), rs.Mapper.Table2Obj(name))
f := fieldByName(vv.Elem(), rs.db.Mapper.Table2Obj(name))
if f.IsValid() {
newDest[j] = f.Addr().Interface()
} else {
@ -116,36 +116,6 @@ func (rs *Rows) ScanStructByName(dest interface{}) error {
return rs.Rows.Scan(newDest...)
}
type cacheStruct struct {
value reflect.Value
idx int
}
var (
reflectCache = make(map[reflect.Type]*cacheStruct)
reflectCacheMutex sync.RWMutex
)
func ReflectNew(typ reflect.Type) reflect.Value {
reflectCacheMutex.RLock()
cs, ok := reflectCache[typ]
reflectCacheMutex.RUnlock()
const newSize = 200
if !ok || cs.idx+1 > newSize-1 {
cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), newSize, newSize), 0}
reflectCacheMutex.Lock()
reflectCache[typ] = cs
reflectCacheMutex.Unlock()
} else {
reflectCacheMutex.Lock()
cs.idx = cs.idx + 1
reflectCacheMutex.Unlock()
}
return cs.value.Index(cs.idx).Addr()
}
// scan data to a slice's pointer, slice's length should equal to columns' number
func (rs *Rows) ScanSlice(dest interface{}) error {
vv := reflect.ValueOf(dest)
@ -197,9 +167,7 @@ func (rs *Rows) ScanMap(dest interface{}) error {
vvv := vv.Elem()
for i, _ := range cols {
newDest[i] = ReflectNew(vvv.Type().Elem()).Interface()
//v := reflect.New(vvv.Type().Elem())
//newDest[i] = v.Interface()
newDest[i] = rs.db.reflectNew(vvv.Type().Elem()).Interface()
}
err = rs.Rows.Scan(newDest...)
@ -215,32 +183,6 @@ func (rs *Rows) ScanMap(dest interface{}) error {
return nil
}
/*func (rs *Rows) ScanMap(dest interface{}) error {
vv := reflect.ValueOf(dest)
if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map {
return errors.New("dest should be a map's pointer")
}
cols, err := rs.Columns()
if err != nil {
return err
}
newDest := make([]interface{}, len(cols))
err = rs.ScanSlice(newDest)
if err != nil {
return err
}
vvv := vv.Elem()
for i, name := range cols {
vname := reflect.ValueOf(name)
vvv.SetMapIndex(vname, reflect.ValueOf(newDest[i]).Elem())
}
return nil
}*/
type Row struct {
rows *Rows
// One of these two will be non-nil:

View File

@ -44,6 +44,9 @@ func convertTime(dest *NullTime, src interface{}) error {
}
*dest = NullTime(t)
return nil
case time.Time:
*dest = NullTime(s)
return nil
case nil:
default:
return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest)

View File

@ -69,15 +69,17 @@ var (
Enum = "ENUM"
Set = "SET"
Char = "CHAR"
Varchar = "VARCHAR"
NVarchar = "NVARCHAR"
TinyText = "TINYTEXT"
Text = "TEXT"
Clob = "CLOB"
MediumText = "MEDIUMTEXT"
LongText = "LONGTEXT"
Uuid = "UUID"
Char = "CHAR"
Varchar = "VARCHAR"
NVarchar = "NVARCHAR"
TinyText = "TINYTEXT"
Text = "TEXT"
Clob = "CLOB"
MediumText = "MEDIUMTEXT"
LongText = "LONGTEXT"
Uuid = "UUID"
UniqueIdentifier = "UNIQUEIDENTIFIER"
SysName = "SYSNAME"
Date = "DATE"
DateTime = "DATETIME"
@ -132,6 +134,7 @@ var (
LongText: TEXT_TYPE,
Uuid: TEXT_TYPE,
Clob: TEXT_TYPE,
SysName: TEXT_TYPE,
Date: TIME_TYPE,
DateTime: TIME_TYPE,
@ -148,11 +151,12 @@ var (
Binary: BLOB_TYPE,
VarBinary: BLOB_TYPE,
TinyBlob: BLOB_TYPE,
Blob: BLOB_TYPE,
MediumBlob: BLOB_TYPE,
LongBlob: BLOB_TYPE,
Bytea: BLOB_TYPE,
TinyBlob: BLOB_TYPE,
Blob: BLOB_TYPE,
MediumBlob: BLOB_TYPE,
LongBlob: BLOB_TYPE,
Bytea: BLOB_TYPE,
UniqueIdentifier: BLOB_TYPE,
Bool: NUMERIC_TYPE,
@ -289,9 +293,9 @@ func SQLType2Type(st SQLType) reflect.Type {
return reflect.TypeOf(float32(1))
case Double:
return reflect.TypeOf(float64(1))
case Char, Varchar, NVarchar, TinyText, Text, MediumText, LongText, Enum, Set, Uuid, Clob:
case Char, Varchar, NVarchar, TinyText, Text, MediumText, LongText, Enum, Set, Uuid, Clob, SysName:
return reflect.TypeOf("")
case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary:
case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary, UniqueIdentifier:
return reflect.TypeOf([]byte{})
case Bool:
return reflect.TypeOf(true)

View File

@ -1,28 +0,0 @@
Copyright (c) 2014, go-xorm
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,9 +0,0 @@
xorm-redis-cache
================
XORM Redis Cache
[![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/go-xorm/xorm-redis-cache)

View File

@ -1,302 +0,0 @@
package xormrediscache
import (
"bytes"
"encoding/gob"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/go-xorm/core"
"hash/crc32"
// "log"
"reflect"
// "strconv"
"time"
"unsafe"
)
const (
DEFAULT_EXPIRATION = time.Duration(0)
FOREVER_EXPIRATION = time.Duration(-1)
LOGGING_PREFIX = "[redis_cacher]"
)
// Wraps the Redis client to meet the Cache interface.
type RedisCacher struct {
pool *redis.Pool
defaultExpiration time.Duration
Logger core.ILogger
}
// New a Redis Cacher, host as IP endpoint, i.e., localhost:6379, provide empty string or nil if Redis server doesn't
// require AUTH command, defaultExpiration sets the expire duration for a key to live. Until redigo supports
// sharding/clustering, only one host will be in hostList
//
// engine.SetDefaultCacher(xormrediscache.NewRedisCacher("localhost:6379", "", xormrediscache.DEFAULT_EXPIRATION, engine.Logger))
//
// or set MapCacher
//
// engine.MapCacher(&user, xormrediscache.NewRedisCacher("localhost:6379", "", xormrediscache.DEFAULT_EXPIRATION, engine.Logger))
//
func NewRedisCacher(host string, password string, defaultExpiration time.Duration, logger core.ILogger) *RedisCacher {
var pool = &redis.Pool{
MaxIdle: 5,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
// the redis protocol should probably be made sett-able
c, err := redis.Dial("tcp", host)
if err != nil {
return nil, err
}
if len(password) > 0 {
if _, err := c.Do("AUTH", password); err != nil {
c.Close()
return nil, err
}
} else {
// check with PING
if _, err := c.Do("PING"); err != nil {
c.Close()
return nil, err
}
}
return c, err
},
// custom connection test method
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if _, err := c.Do("PING"); err != nil {
return err
}
return nil
},
}
return &RedisCacher{pool: pool, defaultExpiration: defaultExpiration, Logger: logger}
}
func exists(conn redis.Conn, key string) bool {
existed, _ := redis.Bool(conn.Do("EXISTS", key))
return existed
}
func (c *RedisCacher) logErrf(format string, contents ...interface{}) {
if c.Logger != nil {
c.Logger.Errorf(fmt.Sprintf("%s %s", LOGGING_PREFIX, format), contents...)
}
}
func (c *RedisCacher) logDebugf(format string, contents ...interface{}) {
if c.Logger != nil {
c.Logger.Debugf(fmt.Sprintf("%s %s", LOGGING_PREFIX, format), contents...)
}
}
func (c *RedisCacher) getBeanKey(tableName string, id string) string {
return fmt.Sprintf("xorm:bean:%s:%s", tableName, id)
}
func (c *RedisCacher) getSqlKey(tableName string, sql string) string {
// hash sql to minimize key length
crc := crc32.ChecksumIEEE([]byte(sql))
return fmt.Sprintf("xorm:sql:%s:%d", tableName, crc)
}
// Delete all xorm cached objects
func (c *RedisCacher) Flush() error {
// conn := c.pool.Get()
// defer conn.Close()
// _, err := conn.Do("FLUSHALL")
// return err
return c.delObject("xorm:*")
}
func (c *RedisCacher) getObject(key string) interface{} {
conn := c.pool.Get()
defer conn.Close()
raw, err := conn.Do("GET", key)
if raw == nil {
return nil
}
item, err := redis.Bytes(raw, err)
if err != nil {
c.logErrf("redis.Bytes failed: %s", err)
return nil
}
value, err := c.deserialize(item)
return value
}
func (c *RedisCacher) GetIds(tableName, sql string) interface{} {
sqlKey := c.getSqlKey(tableName, sql)
c.logDebugf(" GetIds|tableName:%s|sql:%s|key:%s", tableName, sql, sqlKey)
return c.getObject(sqlKey)
}
func (c *RedisCacher) GetBean(tableName string, id string) interface{} {
beanKey := c.getBeanKey(tableName, id)
c.logDebugf("[xorm/redis_cacher] GetBean|tableName:%s|id:%s|key:%s", tableName, id, beanKey)
return c.getObject(beanKey)
}
func (c *RedisCacher) putObject(key string, value interface{}) {
c.invoke(c.pool.Get().Do, key, value, c.defaultExpiration)
}
func (c *RedisCacher) PutIds(tableName, sql string, ids interface{}) {
sqlKey := c.getSqlKey(tableName, sql)
c.logDebugf("PutIds|tableName:%s|sql:%s|key:%s|obj:%s|type:%v", tableName, sql, sqlKey, ids, reflect.TypeOf(ids))
c.putObject(sqlKey, ids)
}
func (c *RedisCacher) PutBean(tableName string, id string, obj interface{}) {
beanKey := c.getBeanKey(tableName, id)
c.logDebugf("PutBean|tableName:%s|id:%s|key:%s|type:%v", tableName, id, beanKey, reflect.TypeOf(obj))
c.putObject(beanKey, obj)
}
func (c *RedisCacher) delObject(key string) error {
c.logDebugf("delObject key:[%s]", key)
conn := c.pool.Get()
defer conn.Close()
if !exists(conn, key) {
c.logErrf("delObject key:[%s] err: %v", key, core.ErrCacheMiss)
return core.ErrCacheMiss
}
_, err := conn.Do("DEL", key)
return err
}
func (c *RedisCacher) delObjects(key string) error {
c.logDebugf("delObjects key:[%s]", key)
conn := c.pool.Get()
defer conn.Close()
keys, err := conn.Do("KEYS", key)
c.logDebugf("delObjects keys: %v", keys)
if err == nil {
for _, key := range keys.([]interface{}) {
conn.Do("DEL", key)
}
}
return err
}
func (c *RedisCacher) DelIds(tableName, sql string) {
c.delObject(c.getSqlKey(tableName, sql))
}
func (c *RedisCacher) DelBean(tableName string, id string) {
c.delObject(c.getBeanKey(tableName, id))
}
func (c *RedisCacher) ClearIds(tableName string) {
c.delObjects(c.getSqlKey(tableName, "*"))
}
func (c *RedisCacher) ClearBeans(tableName string) {
c.delObjects(c.getBeanKey(tableName, "*"))
}
func (c *RedisCacher) invoke(f func(string, ...interface{}) (interface{}, error),
key string, value interface{}, expires time.Duration) error {
switch expires {
case DEFAULT_EXPIRATION:
expires = c.defaultExpiration
case FOREVER_EXPIRATION:
expires = time.Duration(0)
}
b, err := c.serialize(value)
if err != nil {
return err
}
conn := c.pool.Get()
defer conn.Close()
if expires > 0 {
_, err := f("SETEX", key, int32(expires/time.Second), b)
return err
} else {
_, err := f("SET", key, b)
return err
}
}
func (c *RedisCacher) serialize(value interface{}) ([]byte, error) {
err := c.registerGobConcreteType(value)
if err != nil {
return nil, err
}
if reflect.TypeOf(value).Kind() == reflect.Struct {
return nil, fmt.Errorf("serialize func only take pointer of a struct")
}
var b bytes.Buffer
encoder := gob.NewEncoder(&b)
c.logDebugf("serialize type:%v", reflect.TypeOf(value))
err = encoder.Encode(&value)
if err != nil {
c.logErrf("gob encoding '%s' failed: %s|value:%v", value, err, value)
return nil, err
}
return b.Bytes(), nil
}
func (c *RedisCacher) deserialize(byt []byte) (ptr interface{}, err error) {
b := bytes.NewBuffer(byt)
decoder := gob.NewDecoder(b)
var p interface{}
err = decoder.Decode(&p)
if err != nil {
c.logErrf("decode failed: %v", err)
return
}
v := reflect.ValueOf(p)
c.logDebugf("deserialize type:%v", v.Type())
if v.Kind() == reflect.Struct {
var pp interface{} = &p
datas := reflect.ValueOf(pp).Elem().InterfaceData()
sp := reflect.NewAt(v.Type(),
unsafe.Pointer(datas[1])).Interface()
ptr = sp
vv := reflect.ValueOf(ptr)
c.logDebugf("deserialize convert ptr type:%v | CanAddr:%t", vv.Type(), vv.CanAddr())
} else {
ptr = p
}
return
}
func (c *RedisCacher) registerGobConcreteType(value interface{}) error {
t := reflect.TypeOf(value)
c.logDebugf("registerGobConcreteType:%v", t)
switch t.Kind() {
case reflect.Ptr:
v := reflect.ValueOf(value)
i := v.Elem().Interface()
gob.Register(i)
case reflect.Struct, reflect.Map, reflect.Slice:
gob.Register(value)
case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
// do nothing since already registered known type
default:
return fmt.Errorf("unhandled type: %v", t)
}
return nil
}

View File

@ -1,6 +0,0 @@
redis-cli FLUSHALL
if [ $? == "0" ];then
go test -v -run=TestMysqlWithCache
else
echo "no redis-server running on localhost"
fi