Update xorm to v1 (#323)
Fix limit for databases other than sqlite go mod tidy && go mod vendor Remove unneeded break statements Make everything work with the new xorm version Fix xorm logging Fix lint Fix redis init Fix using id field Fix database init for testing Change default database log level Add xorm logger Use const for postgres go mod tidy Merge branch 'master' into update/xorm # Conflicts: # go.mod # go.sum # vendor/modules.txt go mod vendor Fix loading fixtures for postgres Go mod vendor1 Update xorm to version 1 Co-authored-by: kolaente <k@knt.li> Reviewed-on: https://kolaente.dev/vikunja/api/pulls/323
This commit is contained in:
1139
vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
Normal file
1139
vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
177
vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
generated
vendored
Normal file
177
vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package table allows read and write sorted key/value.
|
||||
package table
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
/*
|
||||
Table:
|
||||
|
||||
Table is consist of one or more data blocks, an optional filter block
|
||||
a metaindex block, an index block and a table footer. Metaindex block
|
||||
is a special block used to keep parameters of the table, such as filter
|
||||
block name and its block handle. Index block is a special block used to
|
||||
keep record of data blocks offset and length, index block use one as
|
||||
restart interval. The key used by index block are the last key of preceding
|
||||
block, shorter separator of adjacent blocks or shorter successor of the
|
||||
last key of the last block. Filter block is an optional block contains
|
||||
sequence of filter data generated by a filter generator.
|
||||
|
||||
Table data structure:
|
||||
+ optional
|
||||
/
|
||||
+--------------+--------------+--------------+------+-------+-----------------+-------------+--------+
|
||||
| data block 1 | ... | data block n | filter block | metaindex block | index block | footer |
|
||||
+--------------+--------------+--------------+--------------+-----------------+-------------+--------+
|
||||
|
||||
Each block followed by a 5-bytes trailer contains compression type and checksum.
|
||||
|
||||
Table block trailer:
|
||||
|
||||
+---------------------------+-------------------+
|
||||
| compression type (1-byte) | checksum (4-byte) |
|
||||
+---------------------------+-------------------+
|
||||
|
||||
The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression
|
||||
type also included in the checksum.
|
||||
|
||||
Table footer:
|
||||
|
||||
+------------------- 40-bytes -------------------+
|
||||
/ \
|
||||
+------------------------+--------------------+------+-----------------+
|
||||
| metaindex block handle / index block handle / ---- | magic (8-bytes) |
|
||||
+------------------------+--------------------+------+-----------------+
|
||||
|
||||
The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/".
|
||||
|
||||
NOTE: All fixed-length integer are little-endian.
|
||||
*/
|
||||
|
||||
/*
|
||||
Block:
|
||||
|
||||
Block is consist of one or more key/value entries and a block trailer.
|
||||
Block entry shares key prefix with its preceding key until a restart
|
||||
point reached. A block should contains at least one restart point.
|
||||
First restart point are always zero.
|
||||
|
||||
Block data structure:
|
||||
|
||||
+ restart point + restart point (depends on restart interval)
|
||||
/ /
|
||||
+---------------+---------------+---------------+---------------+---------+
|
||||
| block entry 1 | block entry 2 | ... | block entry n | trailer |
|
||||
+---------------+---------------+---------------+---------------+---------+
|
||||
|
||||
Key/value entry:
|
||||
|
||||
+---- key len ----+
|
||||
/ \
|
||||
+-------+---------+-----------+---------+--------------------+--------------+----------------+
|
||||
| shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) |
|
||||
+-----------------+---------------------+--------------------+--------------+----------------+
|
||||
|
||||
Block entry shares key prefix with its preceding key:
|
||||
Conditions:
|
||||
restart_interval=2
|
||||
entry one : key=deck,value=v1
|
||||
entry two : key=dock,value=v2
|
||||
entry three: key=duck,value=v3
|
||||
The entries will be encoded as follow:
|
||||
|
||||
+ restart point (offset=0) + restart point (offset=16)
|
||||
/ /
|
||||
+-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
|
||||
| 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" |
|
||||
+-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
|
||||
\ / \ / \ /
|
||||
+----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+
|
||||
|
||||
The block trailer will contains two restart points:
|
||||
|
||||
+------------+-----------+--------+
|
||||
| 0 | 16 | 2 |
|
||||
+------------+-----------+---+----+
|
||||
\ / \
|
||||
+-- restart points --+ + restart points length
|
||||
|
||||
Block trailer:
|
||||
|
||||
+-- 4-bytes --+
|
||||
/ \
|
||||
+-----------------+-----------------+-----------------+------------------------------+
|
||||
| restart point 1 | .... | restart point n | restart points len (4-bytes) |
|
||||
+-----------------+-----------------+-----------------+------------------------------+
|
||||
|
||||
|
||||
NOTE: All fixed-length integer are little-endian.
|
||||
*/
|
||||
|
||||
/*
|
||||
Filter block:
|
||||
|
||||
Filter block consist of one or more filter data and a filter block trailer.
|
||||
The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg.
|
||||
|
||||
Filter block data structure:
|
||||
|
||||
+ offset 1 + offset 2 + offset n + trailer offset
|
||||
/ / / /
|
||||
+---------------+---------------+---------------+---------+
|
||||
| filter data 1 | ... | filter data n | trailer |
|
||||
+---------------+---------------+---------------+---------+
|
||||
|
||||
Filter block trailer:
|
||||
|
||||
+- 4-bytes -+
|
||||
/ \
|
||||
+---------------+---------------+---------------+-------------------------------+------------------+
|
||||
| data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
|
||||
+-------------- +---------------+---------------+-------------------------------+------------------+
|
||||
|
||||
|
||||
NOTE: All fixed-length integer are little-endian.
|
||||
*/
|
||||
|
||||
const (
|
||||
blockTrailerLen = 5
|
||||
footerLen = 48
|
||||
|
||||
magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb"
|
||||
|
||||
// The block type gives the per-block compression format.
|
||||
// These constants are part of the file format and should not be changed.
|
||||
blockTypeNoCompression = 0
|
||||
blockTypeSnappyCompression = 1
|
||||
|
||||
// Generate new filter every 2KB of data
|
||||
filterBaseLg = 11
|
||||
filterBase = 1 << filterBaseLg
|
||||
)
|
||||
|
||||
type blockHandle struct {
|
||||
offset, length uint64
|
||||
}
|
||||
|
||||
func decodeBlockHandle(src []byte) (blockHandle, int) {
|
||||
offset, n := binary.Uvarint(src)
|
||||
length, m := binary.Uvarint(src[n:])
|
||||
if n == 0 || m == 0 {
|
||||
return blockHandle{}, 0
|
||||
}
|
||||
return blockHandle{offset, length}, n + m
|
||||
}
|
||||
|
||||
func encodeBlockHandle(dst []byte, b blockHandle) int {
|
||||
n := binary.PutUvarint(dst, b.offset)
|
||||
m := binary.PutUvarint(dst[n:], b.length)
|
||||
return n + m
|
||||
}
|
375
vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
generated
vendored
Normal file
375
vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
generated
vendored
Normal file
@ -0,0 +1,375 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func sharedPrefixLen(a, b []byte) int {
|
||||
i, n := 0, len(a)
|
||||
if n > len(b) {
|
||||
n = len(b)
|
||||
}
|
||||
for i < n && a[i] == b[i] {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
type blockWriter struct {
|
||||
restartInterval int
|
||||
buf util.Buffer
|
||||
nEntries int
|
||||
prevKey []byte
|
||||
restarts []uint32
|
||||
scratch []byte
|
||||
}
|
||||
|
||||
func (w *blockWriter) append(key, value []byte) {
|
||||
nShared := 0
|
||||
if w.nEntries%w.restartInterval == 0 {
|
||||
w.restarts = append(w.restarts, uint32(w.buf.Len()))
|
||||
} else {
|
||||
nShared = sharedPrefixLen(w.prevKey, key)
|
||||
}
|
||||
n := binary.PutUvarint(w.scratch[0:], uint64(nShared))
|
||||
n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared))
|
||||
n += binary.PutUvarint(w.scratch[n:], uint64(len(value)))
|
||||
w.buf.Write(w.scratch[:n])
|
||||
w.buf.Write(key[nShared:])
|
||||
w.buf.Write(value)
|
||||
w.prevKey = append(w.prevKey[:0], key...)
|
||||
w.nEntries++
|
||||
}
|
||||
|
||||
func (w *blockWriter) finish() {
|
||||
// Write restarts entry.
|
||||
if w.nEntries == 0 {
|
||||
// Must have at least one restart entry.
|
||||
w.restarts = append(w.restarts, 0)
|
||||
}
|
||||
w.restarts = append(w.restarts, uint32(len(w.restarts)))
|
||||
for _, x := range w.restarts {
|
||||
buf4 := w.buf.Alloc(4)
|
||||
binary.LittleEndian.PutUint32(buf4, x)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *blockWriter) reset() {
|
||||
w.buf.Reset()
|
||||
w.nEntries = 0
|
||||
w.restarts = w.restarts[:0]
|
||||
}
|
||||
|
||||
func (w *blockWriter) bytesLen() int {
|
||||
restartsLen := len(w.restarts)
|
||||
if restartsLen == 0 {
|
||||
restartsLen = 1
|
||||
}
|
||||
return w.buf.Len() + 4*restartsLen + 4
|
||||
}
|
||||
|
||||
type filterWriter struct {
|
||||
generator filter.FilterGenerator
|
||||
buf util.Buffer
|
||||
nKeys int
|
||||
offsets []uint32
|
||||
}
|
||||
|
||||
func (w *filterWriter) add(key []byte) {
|
||||
if w.generator == nil {
|
||||
return
|
||||
}
|
||||
w.generator.Add(key)
|
||||
w.nKeys++
|
||||
}
|
||||
|
||||
func (w *filterWriter) flush(offset uint64) {
|
||||
if w.generator == nil {
|
||||
return
|
||||
}
|
||||
for x := int(offset / filterBase); x > len(w.offsets); {
|
||||
w.generate()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *filterWriter) finish() {
|
||||
if w.generator == nil {
|
||||
return
|
||||
}
|
||||
// Generate last keys.
|
||||
|
||||
if w.nKeys > 0 {
|
||||
w.generate()
|
||||
}
|
||||
w.offsets = append(w.offsets, uint32(w.buf.Len()))
|
||||
for _, x := range w.offsets {
|
||||
buf4 := w.buf.Alloc(4)
|
||||
binary.LittleEndian.PutUint32(buf4, x)
|
||||
}
|
||||
w.buf.WriteByte(filterBaseLg)
|
||||
}
|
||||
|
||||
func (w *filterWriter) generate() {
|
||||
// Record offset.
|
||||
w.offsets = append(w.offsets, uint32(w.buf.Len()))
|
||||
// Generate filters.
|
||||
if w.nKeys > 0 {
|
||||
w.generator.Generate(&w.buf)
|
||||
w.nKeys = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Writer is a table writer.
|
||||
type Writer struct {
|
||||
writer io.Writer
|
||||
err error
|
||||
// Options
|
||||
cmp comparer.Comparer
|
||||
filter filter.Filter
|
||||
compression opt.Compression
|
||||
blockSize int
|
||||
|
||||
dataBlock blockWriter
|
||||
indexBlock blockWriter
|
||||
filterBlock filterWriter
|
||||
pendingBH blockHandle
|
||||
offset uint64
|
||||
nEntries int
|
||||
// Scratch allocated enough for 5 uvarint. Block writer should not use
|
||||
// first 20-bytes since it will be used to encode block handle, which
|
||||
// then passed to the block writer itself.
|
||||
scratch [50]byte
|
||||
comparerScratch []byte
|
||||
compressionScratch []byte
|
||||
}
|
||||
|
||||
func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
|
||||
// Compress the buffer if necessary.
|
||||
var b []byte
|
||||
if compression == opt.SnappyCompression {
|
||||
// Allocate scratch enough for compression and block trailer.
|
||||
if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
|
||||
w.compressionScratch = make([]byte, n)
|
||||
}
|
||||
compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
|
||||
n := len(compressed)
|
||||
b = compressed[:n+blockTrailerLen]
|
||||
b[n] = blockTypeSnappyCompression
|
||||
} else {
|
||||
tmp := buf.Alloc(blockTrailerLen)
|
||||
tmp[0] = blockTypeNoCompression
|
||||
b = buf.Bytes()
|
||||
}
|
||||
|
||||
// Calculate the checksum.
|
||||
n := len(b) - 4
|
||||
checksum := util.NewCRC(b[:n]).Value()
|
||||
binary.LittleEndian.PutUint32(b[n:], checksum)
|
||||
|
||||
// Write the buffer to the file.
|
||||
_, err = w.writer.Write(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
|
||||
w.offset += uint64(len(b))
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Writer) flushPendingBH(key []byte) {
|
||||
if w.pendingBH.length == 0 {
|
||||
return
|
||||
}
|
||||
var separator []byte
|
||||
if len(key) == 0 {
|
||||
separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey)
|
||||
} else {
|
||||
separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key)
|
||||
}
|
||||
if separator == nil {
|
||||
separator = w.dataBlock.prevKey
|
||||
} else {
|
||||
w.comparerScratch = separator
|
||||
}
|
||||
n := encodeBlockHandle(w.scratch[:20], w.pendingBH)
|
||||
// Append the block handle to the index block.
|
||||
w.indexBlock.append(separator, w.scratch[:n])
|
||||
// Reset prev key of the data block.
|
||||
w.dataBlock.prevKey = w.dataBlock.prevKey[:0]
|
||||
// Clear pending block handle.
|
||||
w.pendingBH = blockHandle{}
|
||||
}
|
||||
|
||||
func (w *Writer) finishBlock() error {
|
||||
w.dataBlock.finish()
|
||||
bh, err := w.writeBlock(&w.dataBlock.buf, w.compression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.pendingBH = bh
|
||||
// Reset the data block.
|
||||
w.dataBlock.reset()
|
||||
// Flush the filter block.
|
||||
w.filterBlock.flush(w.offset)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append appends key/value pair to the table. The keys passed must
|
||||
// be in increasing order.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Append returns.
|
||||
func (w *Writer) Append(key, value []byte) error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 {
|
||||
w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key)
|
||||
return w.err
|
||||
}
|
||||
|
||||
w.flushPendingBH(key)
|
||||
// Append key/value pair to the data block.
|
||||
w.dataBlock.append(key, value)
|
||||
// Add key to the filter block.
|
||||
w.filterBlock.add(key)
|
||||
|
||||
// Finish the data block if block size target reached.
|
||||
if w.dataBlock.bytesLen() >= w.blockSize {
|
||||
if err := w.finishBlock(); err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
}
|
||||
w.nEntries++
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlocksLen returns number of blocks written so far.
|
||||
func (w *Writer) BlocksLen() int {
|
||||
n := w.indexBlock.nEntries
|
||||
if w.pendingBH.length > 0 {
|
||||
// Includes the pending block.
|
||||
n++
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// EntriesLen returns number of entries added so far.
|
||||
func (w *Writer) EntriesLen() int {
|
||||
return w.nEntries
|
||||
}
|
||||
|
||||
// BytesLen returns number of bytes written so far.
|
||||
func (w *Writer) BytesLen() int {
|
||||
return int(w.offset)
|
||||
}
|
||||
|
||||
// Close will finalize the table. Calling Append is not possible
|
||||
// after Close, but calling BlocksLen, EntriesLen and BytesLen
|
||||
// is still possible.
|
||||
func (w *Writer) Close() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Write the last data block. Or empty data block if there
|
||||
// aren't any data blocks at all.
|
||||
if w.dataBlock.nEntries > 0 || w.nEntries == 0 {
|
||||
if err := w.finishBlock(); err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
}
|
||||
w.flushPendingBH(nil)
|
||||
|
||||
// Write the filter block.
|
||||
var filterBH blockHandle
|
||||
w.filterBlock.finish()
|
||||
if buf := &w.filterBlock.buf; buf.Len() > 0 {
|
||||
filterBH, w.err = w.writeBlock(buf, opt.NoCompression)
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
}
|
||||
|
||||
// Write the metaindex block.
|
||||
if filterBH.length > 0 {
|
||||
key := []byte("filter." + w.filter.Name())
|
||||
n := encodeBlockHandle(w.scratch[:20], filterBH)
|
||||
w.dataBlock.append(key, w.scratch[:n])
|
||||
}
|
||||
w.dataBlock.finish()
|
||||
metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression)
|
||||
if err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Write the index block.
|
||||
w.indexBlock.finish()
|
||||
indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression)
|
||||
if err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Write the table footer.
|
||||
footer := w.scratch[:footerLen]
|
||||
for i := range footer {
|
||||
footer[i] = 0
|
||||
}
|
||||
n := encodeBlockHandle(footer, metaindexBH)
|
||||
encodeBlockHandle(footer[n:], indexBH)
|
||||
copy(footer[footerLen-len(magic):], magic)
|
||||
if _, err := w.writer.Write(footer); err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
w.offset += footerLen
|
||||
|
||||
w.err = errors.New("leveldb/table: writer is closed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWriter creates a new initialized table writer for the file.
|
||||
//
|
||||
// Table writer is not safe for concurrent use.
|
||||
func NewWriter(f io.Writer, o *opt.Options) *Writer {
|
||||
w := &Writer{
|
||||
writer: f,
|
||||
cmp: o.GetComparer(),
|
||||
filter: o.GetFilter(),
|
||||
compression: o.GetCompression(),
|
||||
blockSize: o.GetBlockSize(),
|
||||
comparerScratch: make([]byte, 0),
|
||||
}
|
||||
// data block
|
||||
w.dataBlock.restartInterval = o.GetBlockRestartInterval()
|
||||
// The first 20-bytes are used for encoding block handle.
|
||||
w.dataBlock.scratch = w.scratch[20:]
|
||||
// index block
|
||||
w.indexBlock.restartInterval = 1
|
||||
w.indexBlock.scratch = w.scratch[20:]
|
||||
// filter block
|
||||
if w.filter != nil {
|
||||
w.filterBlock.generator = w.filter.NewGenerator()
|
||||
w.filterBlock.flush(0)
|
||||
}
|
||||
return w
|
||||
}
|
Reference in New Issue
Block a user