mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-12-28 01:56:30 +00:00
bumps uptrace/bun dependencies to v1.2.6 (#3569)
This commit is contained in:
parent
a444adee97
commit
3fceb5fc1a
11
go.mod
11
go.mod
|
@ -76,10 +76,10 @@ require (
|
||||||
github.com/tetratelabs/wazero v1.8.1
|
github.com/tetratelabs/wazero v1.8.1
|
||||||
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
|
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
|
||||||
github.com/ulule/limiter/v3 v3.11.2
|
github.com/ulule/limiter/v3 v3.11.2
|
||||||
github.com/uptrace/bun v1.2.5
|
github.com/uptrace/bun v1.2.6
|
||||||
github.com/uptrace/bun/dialect/pgdialect v1.2.5
|
github.com/uptrace/bun/dialect/pgdialect v1.2.6
|
||||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.5
|
github.com/uptrace/bun/dialect/sqlitedialect v1.2.6
|
||||||
github.com/uptrace/bun/extra/bunotel v1.2.5
|
github.com/uptrace/bun/extra/bunotel v1.2.6
|
||||||
github.com/wagslane/go-password-validator v0.3.0
|
github.com/wagslane/go-password-validator v0.3.0
|
||||||
github.com/yuin/goldmark v1.7.8
|
github.com/yuin/goldmark v1.7.8
|
||||||
go.opentelemetry.io/otel v1.32.0
|
go.opentelemetry.io/otel v1.32.0
|
||||||
|
@ -111,7 +111,9 @@ require (
|
||||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
github.com/aymerick/douceur v0.2.0 // indirect
|
github.com/aymerick/douceur v0.2.0 // indirect
|
||||||
|
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/buger/jsonparser v1.1.1 // indirect
|
||||||
github.com/bytedance/sonic v1.11.6 // indirect
|
github.com/bytedance/sonic v1.11.6 // indirect
|
||||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
|
@ -224,6 +226,7 @@ require (
|
||||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 // indirect
|
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 // indirect
|
||||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||||
|
github.com/wk8/go-ordered-map/v2 v2.1.9-0.20240816141633-0a40785b4f41 // indirect
|
||||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||||
|
|
22
go.sum
generated
22
go.sum
generated
|
@ -97,10 +97,14 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
||||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||||
|
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||||
|
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
|
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
|
||||||
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
|
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
|
||||||
|
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||||
|
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||||
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
|
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
|
||||||
|
@ -581,14 +585,14 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E
|
||||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||||
github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA=
|
github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA=
|
||||||
github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI=
|
github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI=
|
||||||
github.com/uptrace/bun v1.2.5 h1:gSprL5xiBCp+tzcZHgENzJpXnmQwRM/A6s4HnBF85mc=
|
github.com/uptrace/bun v1.2.6 h1:lyGBQAhNiClchb97HA2cBnDeRxwTRLhSIgiFPXVisV8=
|
||||||
github.com/uptrace/bun v1.2.5/go.mod h1:vkQMS4NNs4VNZv92y53uBSHXRqYyJp4bGhMHgaNCQpY=
|
github.com/uptrace/bun v1.2.6/go.mod h1:xMgnVFf+/5xsrFBU34HjDJmzZnXbVuNEt/Ih56I8qBU=
|
||||||
github.com/uptrace/bun/dialect/pgdialect v1.2.5 h1:dWLUxpjTdglzfBks2x+U2WIi+nRVjuh7Z3DLYVFswJk=
|
github.com/uptrace/bun/dialect/pgdialect v1.2.6 h1:iNd1YLx619K+sZK+dRcWPzluurXYK1QwIkp9FEfNB/8=
|
||||||
github.com/uptrace/bun/dialect/pgdialect v1.2.5/go.mod h1:stwnlE8/6x8cuQ2aXcZqwDK/d+6jxgO3iQewflJT6C4=
|
github.com/uptrace/bun/dialect/pgdialect v1.2.6/go.mod h1:OL7d3qZLxKYP8kxNhMg3IheN1pDR3UScGjoUP+ivxJQ=
|
||||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.5 h1:liDvMaIWrN8DrHcxVbviOde/VDss9uhcqpcTSL3eJjc=
|
github.com/uptrace/bun/dialect/sqlitedialect v1.2.6 h1:p8vA39kR9Ypw0so+gUhFhd8NOufx3MzvoxJeUpwieQU=
|
||||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.5/go.mod h1:Mw6IDL/jNUL5ozcREAezOJSZ9Jm4LJlfoaXxBEfNBlM=
|
github.com/uptrace/bun/dialect/sqlitedialect v1.2.6/go.mod h1:sdGy8eCv9WVGDrPhagE9i7FASeyj3BFkHzkRMF/qK3w=
|
||||||
github.com/uptrace/bun/extra/bunotel v1.2.5 h1:kkuuTbrG9d5leYZuSBKhq2gtq346lIrxf98Mig2y128=
|
github.com/uptrace/bun/extra/bunotel v1.2.6 h1:6m90acv9hsDuTYRo3oiKCWMatGPmi+feKAx8Y/GPj9A=
|
||||||
github.com/uptrace/bun/extra/bunotel v1.2.5/go.mod h1:rCHLszRZwppWE9cGDodO2FCI1qCrLwDjONp38KD3bA8=
|
github.com/uptrace/bun/extra/bunotel v1.2.6/go.mod h1:QGqnFNJ2H88juh7DmgdPJZVN9bSTpj7UaGllSO9JDKk=
|
||||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c=
|
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c=
|
||||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ=
|
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ=
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||||
|
@ -603,6 +607,8 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
github.com/wagslane/go-password-validator v0.3.0 h1:vfxOPzGHkz5S146HDpavl0cw1DSVP061Ry2PX0/ON6I=
|
github.com/wagslane/go-password-validator v0.3.0 h1:vfxOPzGHkz5S146HDpavl0cw1DSVP061Ry2PX0/ON6I=
|
||||||
github.com/wagslane/go-password-validator v0.3.0/go.mod h1:TI1XJ6T5fRdRnHqHt14pvy1tNVnrwe7m3/f1f2fDphQ=
|
github.com/wagslane/go-password-validator v0.3.0/go.mod h1:TI1XJ6T5fRdRnHqHt14pvy1tNVnrwe7m3/f1f2fDphQ=
|
||||||
|
github.com/wk8/go-ordered-map/v2 v2.1.9-0.20240816141633-0a40785b4f41 h1:rnB8ZLMeAr3VcqjfRkAm27qb8y6zFKNfuHvy1Gfe7KI=
|
||||||
|
github.com/wk8/go-ordered-map/v2 v2.1.9-0.20240816141633-0a40785b4f41/go.mod h1:DbzwytT4g/odXquuOCqroKvtxxldI4nb3nuesHF/Exo=
|
||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||||
|
|
27
vendor/github.com/bahlo/generic-list-go/LICENSE
generated
vendored
Normal file
27
vendor/github.com/bahlo/generic-list-go/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
5
vendor/github.com/bahlo/generic-list-go/README.md
generated
vendored
Normal file
5
vendor/github.com/bahlo/generic-list-go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
# generic-list-go [![CI](https://github.com/bahlo/generic-list-go/actions/workflows/ci.yml/badge.svg)](https://github.com/bahlo/generic-list-go/actions/workflows/ci.yml)
|
||||||
|
|
||||||
|
Go [container/list](https://pkg.go.dev/container/list) but with generics.
|
||||||
|
|
||||||
|
The code is based on `container/list` in `go1.18beta2`.
|
235
vendor/github.com/bahlo/generic-list-go/list.go
generated
vendored
Normal file
235
vendor/github.com/bahlo/generic-list-go/list.go
generated
vendored
Normal file
|
@ -0,0 +1,235 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package list implements a doubly linked list.
|
||||||
|
//
|
||||||
|
// To iterate over a list (where l is a *List):
|
||||||
|
// for e := l.Front(); e != nil; e = e.Next() {
|
||||||
|
// // do something with e.Value
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
package list
|
||||||
|
|
||||||
|
// Element is an element of a linked list.
|
||||||
|
type Element[T any] struct {
|
||||||
|
// Next and previous pointers in the doubly-linked list of elements.
|
||||||
|
// To simplify the implementation, internally a list l is implemented
|
||||||
|
// as a ring, such that &l.root is both the next element of the last
|
||||||
|
// list element (l.Back()) and the previous element of the first list
|
||||||
|
// element (l.Front()).
|
||||||
|
next, prev *Element[T]
|
||||||
|
|
||||||
|
// The list to which this element belongs.
|
||||||
|
list *List[T]
|
||||||
|
|
||||||
|
// The value stored with this element.
|
||||||
|
Value T
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next list element or nil.
|
||||||
|
func (e *Element[T]) Next() *Element[T] {
|
||||||
|
if p := e.next; e.list != nil && p != &e.list.root {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prev returns the previous list element or nil.
|
||||||
|
func (e *Element[T]) Prev() *Element[T] {
|
||||||
|
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List represents a doubly linked list.
|
||||||
|
// The zero value for List is an empty list ready to use.
|
||||||
|
type List[T any] struct {
|
||||||
|
root Element[T] // sentinel list element, only &root, root.prev, and root.next are used
|
||||||
|
len int // current list length excluding (this) sentinel element
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes or clears list l.
|
||||||
|
func (l *List[T]) Init() *List[T] {
|
||||||
|
l.root.next = &l.root
|
||||||
|
l.root.prev = &l.root
|
||||||
|
l.len = 0
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns an initialized list.
|
||||||
|
func New[T any]() *List[T] { return new(List[T]).Init() }
|
||||||
|
|
||||||
|
// Len returns the number of elements of list l.
|
||||||
|
// The complexity is O(1).
|
||||||
|
func (l *List[T]) Len() int { return l.len }
|
||||||
|
|
||||||
|
// Front returns the first element of list l or nil if the list is empty.
|
||||||
|
func (l *List[T]) Front() *Element[T] {
|
||||||
|
if l.len == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return l.root.next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Back returns the last element of list l or nil if the list is empty.
|
||||||
|
func (l *List[T]) Back() *Element[T] {
|
||||||
|
if l.len == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return l.root.prev
|
||||||
|
}
|
||||||
|
|
||||||
|
// lazyInit lazily initializes a zero List value.
|
||||||
|
func (l *List[T]) lazyInit() {
|
||||||
|
if l.root.next == nil {
|
||||||
|
l.Init()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert inserts e after at, increments l.len, and returns e.
|
||||||
|
func (l *List[T]) insert(e, at *Element[T]) *Element[T] {
|
||||||
|
e.prev = at
|
||||||
|
e.next = at.next
|
||||||
|
e.prev.next = e
|
||||||
|
e.next.prev = e
|
||||||
|
e.list = l
|
||||||
|
l.len++
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
|
||||||
|
func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] {
|
||||||
|
return l.insert(&Element[T]{Value: v}, at)
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove removes e from its list, decrements l.len
|
||||||
|
func (l *List[T]) remove(e *Element[T]) {
|
||||||
|
e.prev.next = e.next
|
||||||
|
e.next.prev = e.prev
|
||||||
|
e.next = nil // avoid memory leaks
|
||||||
|
e.prev = nil // avoid memory leaks
|
||||||
|
e.list = nil
|
||||||
|
l.len--
|
||||||
|
}
|
||||||
|
|
||||||
|
// move moves e to next to at.
|
||||||
|
func (l *List[T]) move(e, at *Element[T]) {
|
||||||
|
if e == at {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.prev.next = e.next
|
||||||
|
e.next.prev = e.prev
|
||||||
|
|
||||||
|
e.prev = at
|
||||||
|
e.next = at.next
|
||||||
|
e.prev.next = e
|
||||||
|
e.next.prev = e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes e from l if e is an element of list l.
|
||||||
|
// It returns the element value e.Value.
|
||||||
|
// The element must not be nil.
|
||||||
|
func (l *List[T]) Remove(e *Element[T]) T {
|
||||||
|
if e.list == l {
|
||||||
|
// if e.list == l, l must have been initialized when e was inserted
|
||||||
|
// in l or l == nil (e is a zero Element) and l.remove will crash
|
||||||
|
l.remove(e)
|
||||||
|
}
|
||||||
|
return e.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||||
|
func (l *List[T]) PushFront(v T) *Element[T] {
|
||||||
|
l.lazyInit()
|
||||||
|
return l.insertValue(v, &l.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBack inserts a new element e with value v at the back of list l and returns e.
|
||||||
|
func (l *List[T]) PushBack(v T) *Element[T] {
|
||||||
|
l.lazyInit()
|
||||||
|
return l.insertValue(v, l.root.prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
|
||||||
|
// If mark is not an element of l, the list is not modified.
|
||||||
|
// The mark must not be nil.
|
||||||
|
func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] {
|
||||||
|
if mark.list != l {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// see comment in List.Remove about initialization of l
|
||||||
|
return l.insertValue(v, mark.prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
|
||||||
|
// If mark is not an element of l, the list is not modified.
|
||||||
|
// The mark must not be nil.
|
||||||
|
func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] {
|
||||||
|
if mark.list != l {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// see comment in List.Remove about initialization of l
|
||||||
|
return l.insertValue(v, mark)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveToFront moves element e to the front of list l.
|
||||||
|
// If e is not an element of l, the list is not modified.
|
||||||
|
// The element must not be nil.
|
||||||
|
func (l *List[T]) MoveToFront(e *Element[T]) {
|
||||||
|
if e.list != l || l.root.next == e {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// see comment in List.Remove about initialization of l
|
||||||
|
l.move(e, &l.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveToBack moves element e to the back of list l.
|
||||||
|
// If e is not an element of l, the list is not modified.
|
||||||
|
// The element must not be nil.
|
||||||
|
func (l *List[T]) MoveToBack(e *Element[T]) {
|
||||||
|
if e.list != l || l.root.prev == e {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// see comment in List.Remove about initialization of l
|
||||||
|
l.move(e, l.root.prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveBefore moves element e to its new position before mark.
|
||||||
|
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||||
|
// The element and mark must not be nil.
|
||||||
|
func (l *List[T]) MoveBefore(e, mark *Element[T]) {
|
||||||
|
if e.list != l || e == mark || mark.list != l {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.move(e, mark.prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveAfter moves element e to its new position after mark.
|
||||||
|
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||||
|
// The element and mark must not be nil.
|
||||||
|
func (l *List[T]) MoveAfter(e, mark *Element[T]) {
|
||||||
|
if e.list != l || e == mark || mark.list != l {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.move(e, mark)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBackList inserts a copy of another list at the back of list l.
|
||||||
|
// The lists l and other may be the same. They must not be nil.
|
||||||
|
func (l *List[T]) PushBackList(other *List[T]) {
|
||||||
|
l.lazyInit()
|
||||||
|
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
|
||||||
|
l.insertValue(e.Value, l.root.prev)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushFrontList inserts a copy of another list at the front of list l.
|
||||||
|
// The lists l and other may be the same. They must not be nil.
|
||||||
|
func (l *List[T]) PushFrontList(other *List[T]) {
|
||||||
|
l.lazyInit()
|
||||||
|
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
|
||||||
|
l.insertValue(e.Value, &l.root)
|
||||||
|
}
|
||||||
|
}
|
12
vendor/github.com/buger/jsonparser/.gitignore
generated
vendored
Normal file
12
vendor/github.com/buger/jsonparser/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
|
||||||
|
*.test
|
||||||
|
|
||||||
|
*.out
|
||||||
|
|
||||||
|
*.mprof
|
||||||
|
|
||||||
|
.idea
|
||||||
|
|
||||||
|
vendor/github.com/buger/goterm/
|
||||||
|
prof.cpu
|
||||||
|
prof.mem
|
11
vendor/github.com/buger/jsonparser/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/buger/jsonparser/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
language: go
|
||||||
|
arch:
|
||||||
|
- amd64
|
||||||
|
- ppc64le
|
||||||
|
go:
|
||||||
|
- 1.7.x
|
||||||
|
- 1.8.x
|
||||||
|
- 1.9.x
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
script: go test -v ./.
|
12
vendor/github.com/buger/jsonparser/Dockerfile
generated
vendored
Normal file
12
vendor/github.com/buger/jsonparser/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
FROM golang:1.6
|
||||||
|
|
||||||
|
RUN go get github.com/Jeffail/gabs
|
||||||
|
RUN go get github.com/bitly/go-simplejson
|
||||||
|
RUN go get github.com/pquerna/ffjson
|
||||||
|
RUN go get github.com/antonholmquist/jason
|
||||||
|
RUN go get github.com/mreiferson/go-ujson
|
||||||
|
RUN go get -tags=unsafe -u github.com/ugorji/go/codec
|
||||||
|
RUN go get github.com/mailru/easyjson
|
||||||
|
|
||||||
|
WORKDIR /go/src/github.com/buger/jsonparser
|
||||||
|
ADD . /go/src/github.com/buger/jsonparser
|
21
vendor/github.com/buger/jsonparser/LICENSE
generated
vendored
Normal file
21
vendor/github.com/buger/jsonparser/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2016 Leonid Bugaev
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
36
vendor/github.com/buger/jsonparser/Makefile
generated
vendored
Normal file
36
vendor/github.com/buger/jsonparser/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
SOURCE = parser.go
|
||||||
|
CONTAINER = jsonparser
|
||||||
|
SOURCE_PATH = /go/src/github.com/buger/jsonparser
|
||||||
|
BENCHMARK = JsonParser
|
||||||
|
BENCHTIME = 5s
|
||||||
|
TEST = .
|
||||||
|
DRUN = docker run -v `pwd`:$(SOURCE_PATH) -i -t $(CONTAINER)
|
||||||
|
|
||||||
|
build:
|
||||||
|
docker build -t $(CONTAINER) .
|
||||||
|
|
||||||
|
race:
|
||||||
|
$(DRUN) --env GORACE="halt_on_error=1" go test ./. $(ARGS) -v -race -timeout 15s
|
||||||
|
|
||||||
|
bench:
|
||||||
|
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -benchtime $(BENCHTIME) -v
|
||||||
|
|
||||||
|
bench_local:
|
||||||
|
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench . $(ARGS) -benchtime $(BENCHTIME) -v
|
||||||
|
|
||||||
|
profile:
|
||||||
|
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -memprofile mem.mprof -v
|
||||||
|
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -cpuprofile cpu.out -v
|
||||||
|
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -c
|
||||||
|
|
||||||
|
test:
|
||||||
|
$(DRUN) go test $(LDFLAGS) ./ -run $(TEST) -timeout 10s $(ARGS) -v
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
$(DRUN) go fmt ./...
|
||||||
|
|
||||||
|
vet:
|
||||||
|
$(DRUN) go vet ./.
|
||||||
|
|
||||||
|
bash:
|
||||||
|
$(DRUN) /bin/bash
|
365
vendor/github.com/buger/jsonparser/README.md
generated
vendored
Normal file
365
vendor/github.com/buger/jsonparser/README.md
generated
vendored
Normal file
|
@ -0,0 +1,365 @@
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/buger/jsonparser)](https://goreportcard.com/report/github.com/buger/jsonparser) ![License](https://img.shields.io/dub/l/vibe-d.svg)
|
||||||
|
# Alternative JSON parser for Go (10x times faster standard library)
|
||||||
|
|
||||||
|
It does not require you to know the structure of the payload (eg. create structs), and allows accessing fields by providing the path to them. It is up to **10 times faster** than standard `encoding/json` package (depending on payload size and usage), **allocates no memory**. See benchmarks below.
|
||||||
|
|
||||||
|
## Rationale
|
||||||
|
Originally I made this for a project that relies on a lot of 3rd party APIs that can be unpredictable and complex.
|
||||||
|
I love simplicity and prefer to avoid external dependecies. `encoding/json` requires you to know exactly your data structures, or if you prefer to use `map[string]interface{}` instead, it will be very slow and hard to manage.
|
||||||
|
I investigated what's on the market and found that most libraries are just wrappers around `encoding/json`, there is few options with own parsers (`ffjson`, `easyjson`), but they still requires you to create data structures.
|
||||||
|
|
||||||
|
|
||||||
|
Goal of this project is to push JSON parser to the performance limits and not sacrifice with compliance and developer user experience.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
For the given JSON our goal is to extract the user's full name, number of github followers and avatar.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/buger/jsonparser"
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
data := []byte(`{
|
||||||
|
"person": {
|
||||||
|
"name": {
|
||||||
|
"first": "Leonid",
|
||||||
|
"last": "Bugaev",
|
||||||
|
"fullName": "Leonid Bugaev"
|
||||||
|
},
|
||||||
|
"github": {
|
||||||
|
"handle": "buger",
|
||||||
|
"followers": 109
|
||||||
|
},
|
||||||
|
"avatars": [
|
||||||
|
{ "url": "https://avatars1.githubusercontent.com/u/14009?v=3&s=460", "type": "thumbnail" }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"company": {
|
||||||
|
"name": "Acme"
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
// You can specify key path by providing arguments to Get function
|
||||||
|
jsonparser.Get(data, "person", "name", "fullName")
|
||||||
|
|
||||||
|
// There is `GetInt` and `GetBoolean` helpers if you exactly know key data type
|
||||||
|
jsonparser.GetInt(data, "person", "github", "followers")
|
||||||
|
|
||||||
|
// When you try to get object, it will return you []byte slice pointer to data containing it
|
||||||
|
// In `company` it will be `{"name": "Acme"}`
|
||||||
|
jsonparser.Get(data, "company")
|
||||||
|
|
||||||
|
// If the key doesn't exist it will throw an error
|
||||||
|
var size int64
|
||||||
|
if value, err := jsonparser.GetInt(data, "company", "size"); err == nil {
|
||||||
|
size = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// You can use `ArrayEach` helper to iterate items [item1, item2 .... itemN]
|
||||||
|
jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) {
|
||||||
|
fmt.Println(jsonparser.Get(value, "url"))
|
||||||
|
}, "person", "avatars")
|
||||||
|
|
||||||
|
// Or use can access fields by index!
|
||||||
|
jsonparser.GetString(data, "person", "avatars", "[0]", "url")
|
||||||
|
|
||||||
|
// You can use `ObjectEach` helper to iterate objects { "key1":object1, "key2":object2, .... "keyN":objectN }
|
||||||
|
jsonparser.ObjectEach(data, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error {
|
||||||
|
fmt.Printf("Key: '%s'\n Value: '%s'\n Type: %s\n", string(key), string(value), dataType)
|
||||||
|
return nil
|
||||||
|
}, "person", "name")
|
||||||
|
|
||||||
|
// The most efficient way to extract multiple keys is `EachKey`
|
||||||
|
|
||||||
|
paths := [][]string{
|
||||||
|
[]string{"person", "name", "fullName"},
|
||||||
|
[]string{"person", "avatars", "[0]", "url"},
|
||||||
|
[]string{"company", "url"},
|
||||||
|
}
|
||||||
|
jsonparser.EachKey(data, func(idx int, value []byte, vt jsonparser.ValueType, err error){
|
||||||
|
switch idx {
|
||||||
|
case 0: // []string{"person", "name", "fullName"}
|
||||||
|
...
|
||||||
|
case 1: // []string{"person", "avatars", "[0]", "url"}
|
||||||
|
...
|
||||||
|
case 2: // []string{"company", "url"},
|
||||||
|
...
|
||||||
|
}
|
||||||
|
}, paths...)
|
||||||
|
|
||||||
|
// For more information see docs below
|
||||||
|
```
|
||||||
|
|
||||||
|
## Need to speedup your app?
|
||||||
|
|
||||||
|
I'm available for consulting and can help you push your app performance to the limits. Ping me at: leonsbox@gmail.com.
|
||||||
|
|
||||||
|
## Reference
|
||||||
|
|
||||||
|
Library API is really simple. You just need the `Get` method to perform any operation. The rest is just helpers around it.
|
||||||
|
|
||||||
|
You also can view API at [godoc.org](https://godoc.org/github.com/buger/jsonparser)
|
||||||
|
|
||||||
|
|
||||||
|
### **`Get`**
|
||||||
|
```go
|
||||||
|
func Get(data []byte, keys ...string) (value []byte, dataType jsonparser.ValueType, offset int, err error)
|
||||||
|
```
|
||||||
|
Receives data structure, and key path to extract value from.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
* `value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error
|
||||||
|
* `dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null`
|
||||||
|
* `offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper.
|
||||||
|
* `err` - If the key is not found or any other parsing issue, it should return error. If key not found it also sets `dataType` to `NotExist`
|
||||||
|
|
||||||
|
Accepts multiple keys to specify path to JSON value (in case of quering nested structures).
|
||||||
|
If no keys are provided it will try to extract the closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation.
|
||||||
|
|
||||||
|
Note that keys can be an array indexes: `jsonparser.GetInt("person", "avatars", "[0]", "url")`, pretty cool, yeah?
|
||||||
|
|
||||||
|
### **`GetString`**
|
||||||
|
```go
|
||||||
|
func GetString(data []byte, keys ...string) (val string, err error)
|
||||||
|
```
|
||||||
|
Returns strings properly handing escaped and unicode characters. Note that this will cause additional memory allocations.
|
||||||
|
|
||||||
|
### **`GetUnsafeString`**
|
||||||
|
If you need string in your app, and ready to sacrifice with support of escaped symbols in favor of speed. It returns string mapped to existing byte slice memory, without any allocations:
|
||||||
|
```go
|
||||||
|
s, _, := jsonparser.GetUnsafeString(data, "person", "name", "title")
|
||||||
|
switch s {
|
||||||
|
case 'CEO':
|
||||||
|
...
|
||||||
|
case 'Engineer'
|
||||||
|
...
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Note that `unsafe` here means that your string will exist until GC will free underlying byte slice, for most of cases it means that you can use this string only in current context, and should not pass it anywhere externally: through channels or any other way.
|
||||||
|
|
||||||
|
|
||||||
|
### **`GetBoolean`**, **`GetInt`** and **`GetFloat`**
|
||||||
|
```go
|
||||||
|
func GetBoolean(data []byte, keys ...string) (val bool, err error)
|
||||||
|
|
||||||
|
func GetFloat(data []byte, keys ...string) (val float64, err error)
|
||||||
|
|
||||||
|
func GetInt(data []byte, keys ...string) (val int64, err error)
|
||||||
|
```
|
||||||
|
If you know the key type, you can use the helpers above.
|
||||||
|
If key data type do not match, it will return error.
|
||||||
|
|
||||||
|
### **`ArrayEach`**
|
||||||
|
```go
|
||||||
|
func ArrayEach(data []byte, cb func(value []byte, dataType jsonparser.ValueType, offset int, err error), keys ...string)
|
||||||
|
```
|
||||||
|
Needed for iterating arrays, accepts a callback function with the same return arguments as `Get`.
|
||||||
|
|
||||||
|
### **`ObjectEach`**
|
||||||
|
```go
|
||||||
|
func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error)
|
||||||
|
```
|
||||||
|
Needed for iterating object, accepts a callback function. Example:
|
||||||
|
```go
|
||||||
|
var handler func([]byte, []byte, jsonparser.ValueType, int) error
|
||||||
|
handler = func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error {
|
||||||
|
//do stuff here
|
||||||
|
}
|
||||||
|
jsonparser.ObjectEach(myJson, handler)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### **`EachKey`**
|
||||||
|
```go
|
||||||
|
func EachKey(data []byte, cb func(idx int, value []byte, dataType jsonparser.ValueType, err error), paths ...[]string)
|
||||||
|
```
|
||||||
|
When you need to read multiple keys, and you do not afraid of low-level API `EachKey` is your friend. It read payload only single time, and calls callback function once path is found. For example when you call multiple times `Get`, it has to process payload multiple times, each time you call it. Depending on payload `EachKey` can be multiple times faster than `Get`. Path can use nested keys as well!
|
||||||
|
|
||||||
|
```go
|
||||||
|
paths := [][]string{
|
||||||
|
[]string{"uuid"},
|
||||||
|
[]string{"tz"},
|
||||||
|
[]string{"ua"},
|
||||||
|
[]string{"st"},
|
||||||
|
}
|
||||||
|
var data SmallPayload
|
||||||
|
|
||||||
|
jsonparser.EachKey(smallFixture, func(idx int, value []byte, vt jsonparser.ValueType, err error){
|
||||||
|
switch idx {
|
||||||
|
case 0:
|
||||||
|
data.Uuid, _ = value
|
||||||
|
case 1:
|
||||||
|
v, _ := jsonparser.ParseInt(value)
|
||||||
|
data.Tz = int(v)
|
||||||
|
case 2:
|
||||||
|
data.Ua, _ = value
|
||||||
|
case 3:
|
||||||
|
v, _ := jsonparser.ParseInt(value)
|
||||||
|
data.St = int(v)
|
||||||
|
}
|
||||||
|
}, paths...)
|
||||||
|
```
|
||||||
|
|
||||||
|
### **`Set`**
|
||||||
|
```go
|
||||||
|
func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error)
|
||||||
|
```
|
||||||
|
Receives existing data structure, key path to set, and value to set at that key. *This functionality is experimental.*
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
* `value` - Pointer to original data structure with updated or added key value.
|
||||||
|
* `err` - If any parsing issue, it should return error.
|
||||||
|
|
||||||
|
Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures).
|
||||||
|
|
||||||
|
Note that keys can be an array indexes: `jsonparser.Set(data, []byte("http://github.com"), "person", "avatars", "[0]", "url")`
|
||||||
|
|
||||||
|
### **`Delete`**
|
||||||
|
```go
|
||||||
|
func Delete(data []byte, keys ...string) value []byte
|
||||||
|
```
|
||||||
|
Receives existing data structure, and key path to delete. *This functionality is experimental.*
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
* `value` - Pointer to original data structure with key path deleted if it can be found. If there is no key path, then the whole data structure is deleted.
|
||||||
|
|
||||||
|
Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures).
|
||||||
|
|
||||||
|
Note that keys can be an array indexes: `jsonparser.Delete(data, "person", "avatars", "[0]", "url")`
|
||||||
|
|
||||||
|
|
||||||
|
## What makes it so fast?
|
||||||
|
* It does not rely on `encoding/json`, `reflection` or `interface{}`, the only real package dependency is `bytes`.
|
||||||
|
* Operates with JSON payload on byte level, providing you pointers to the original data structure: no memory allocation.
|
||||||
|
* No automatic type conversions, by default everything is a []byte, but it provides you value type, so you can convert by yourself (there is few helpers included).
|
||||||
|
* Does not parse full record, only keys you specified
|
||||||
|
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
There are 3 benchmark types, trying to simulate real-life usage for small, medium and large JSON payloads.
|
||||||
|
For each metric, the lower value is better. Time/op is in nanoseconds. Values better than standard encoding/json marked as bold text.
|
||||||
|
Benchmarks run on standard Linode 1024 box.
|
||||||
|
|
||||||
|
Compared libraries:
|
||||||
|
* https://golang.org/pkg/encoding/json
|
||||||
|
* https://github.com/Jeffail/gabs
|
||||||
|
* https://github.com/a8m/djson
|
||||||
|
* https://github.com/bitly/go-simplejson
|
||||||
|
* https://github.com/antonholmquist/jason
|
||||||
|
* https://github.com/mreiferson/go-ujson
|
||||||
|
* https://github.com/ugorji/go/codec
|
||||||
|
* https://github.com/pquerna/ffjson
|
||||||
|
* https://github.com/mailru/easyjson
|
||||||
|
* https://github.com/buger/jsonparser
|
||||||
|
|
||||||
|
#### TLDR
|
||||||
|
If you want to skip next sections we have 2 winner: `jsonparser` and `easyjson`.
|
||||||
|
`jsonparser` is up to 10 times faster than standard `encoding/json` package (depending on payload size and usage), and almost infinitely (literally) better in memory consumption because it operates with data on byte level, and provide direct slice pointers.
|
||||||
|
`easyjson` wins in CPU in medium tests and frankly i'm impressed with this package: it is remarkable results considering that it is almost drop-in replacement for `encoding/json` (require some code generation).
|
||||||
|
|
||||||
|
It's hard to fully compare `jsonparser` and `easyjson` (or `ffson`), they a true parsers and fully process record, unlike `jsonparser` which parse only keys you specified.
|
||||||
|
|
||||||
|
If you searching for replacement of `encoding/json` while keeping structs, `easyjson` is an amazing choice. If you want to process dynamic JSON, have memory constrains, or more control over your data you should try `jsonparser`.
|
||||||
|
|
||||||
|
`jsonparser` performance heavily depends on usage, and it works best when you do not need to process full record, only some keys. The more calls you need to make, the slower it will be, in contrast `easyjson` (or `ffjson`, `encoding/json`) parser record only 1 time, and then you can make as many calls as you want.
|
||||||
|
|
||||||
|
With great power comes great responsibility! :)
|
||||||
|
|
||||||
|
|
||||||
|
#### Small payload
|
||||||
|
|
||||||
|
Each test processes 190 bytes of http log as a JSON record.
|
||||||
|
It should read multiple fields.
|
||||||
|
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_small_payload_test.go
|
||||||
|
|
||||||
|
Library | time/op | bytes/op | allocs/op
|
||||||
|
------ | ------- | -------- | -------
|
||||||
|
encoding/json struct | 7879 | 880 | 18
|
||||||
|
encoding/json interface{} | 8946 | 1521 | 38
|
||||||
|
Jeffail/gabs | 10053 | 1649 | 46
|
||||||
|
bitly/go-simplejson | 10128 | 2241 | 36
|
||||||
|
antonholmquist/jason | 27152 | 7237 | 101
|
||||||
|
github.com/ugorji/go/codec | 8806 | 2176 | 31
|
||||||
|
mreiferson/go-ujson | **7008** | **1409** | 37
|
||||||
|
a8m/djson | 3862 | 1249 | 30
|
||||||
|
pquerna/ffjson | **3769** | **624** | **15**
|
||||||
|
mailru/easyjson | **2002** | **192** | **9**
|
||||||
|
buger/jsonparser | **1367** | **0** | **0**
|
||||||
|
buger/jsonparser (EachKey API) | **809** | **0** | **0**
|
||||||
|
|
||||||
|
Winners are ffjson, easyjson and jsonparser, where jsonparser is up to 9.8x faster than encoding/json and 4.6x faster than ffjson, and slightly faster than easyjson.
|
||||||
|
If you look at memory allocation, jsonparser has no rivals, as it makes no data copy and operates with raw []byte structures and pointers to it.
|
||||||
|
|
||||||
|
#### Medium payload
|
||||||
|
|
||||||
|
Each test processes a 2.4kb JSON record (based on Clearbit API).
|
||||||
|
It should read multiple nested fields and 1 array.
|
||||||
|
|
||||||
|
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_medium_payload_test.go
|
||||||
|
|
||||||
|
| Library | time/op | bytes/op | allocs/op |
|
||||||
|
| ------- | ------- | -------- | --------- |
|
||||||
|
| encoding/json struct | 57749 | 1336 | 29 |
|
||||||
|
| encoding/json interface{} | 79297 | 10627 | 215 |
|
||||||
|
| Jeffail/gabs | 83807 | 11202 | 235 |
|
||||||
|
| bitly/go-simplejson | 88187 | 17187 | 220 |
|
||||||
|
| antonholmquist/jason | 94099 | 19013 | 247 |
|
||||||
|
| github.com/ugorji/go/codec | 114719 | 6712 | 152 |
|
||||||
|
| mreiferson/go-ujson | **56972** | 11547 | 270 |
|
||||||
|
| a8m/djson | 28525 | 10196 | 198 |
|
||||||
|
| pquerna/ffjson | **20298** | **856** | **20** |
|
||||||
|
| mailru/easyjson | **10512** | **336** | **12** |
|
||||||
|
| buger/jsonparser | **15955** | **0** | **0** |
|
||||||
|
| buger/jsonparser (EachKey API) | **8916** | **0** | **0** |
|
||||||
|
|
||||||
|
The difference between ffjson and jsonparser in CPU usage is smaller, while the memory consumption difference is growing. On the other hand `easyjson` shows remarkable performance for medium payload.
|
||||||
|
|
||||||
|
`gabs`, `go-simplejson` and `jason` are based on encoding/json and map[string]interface{} and actually only helpers for unstructured JSON, their performance correlate with `encoding/json interface{}`, and they will skip next round.
|
||||||
|
`go-ujson` while have its own parser, shows same performance as `encoding/json`, also skips next round. Same situation with `ugorji/go/codec`, but it showed unexpectedly bad performance for complex payloads.
|
||||||
|
|
||||||
|
|
||||||
|
#### Large payload
|
||||||
|
|
||||||
|
Each test processes a 24kb JSON record (based on Discourse API)
|
||||||
|
It should read 2 arrays, and for each item in array get a few fields.
|
||||||
|
Basically it means processing a full JSON file.
|
||||||
|
|
||||||
|
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_large_payload_test.go
|
||||||
|
|
||||||
|
| Library | time/op | bytes/op | allocs/op |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| encoding/json struct | 748336 | 8272 | 307 |
|
||||||
|
| encoding/json interface{} | 1224271 | 215425 | 3395 |
|
||||||
|
| a8m/djson | 510082 | 213682 | 2845 |
|
||||||
|
| pquerna/ffjson | **312271** | **7792** | **298** |
|
||||||
|
| mailru/easyjson | **154186** | **6992** | **288** |
|
||||||
|
| buger/jsonparser | **85308** | **0** | **0** |
|
||||||
|
|
||||||
|
`jsonparser` now is a winner, but do not forget that it is way more lightweight parser than `ffson` or `easyjson`, and they have to parser all the data, while `jsonparser` parse only what you need. All `ffjson`, `easysjon` and `jsonparser` have their own parsing code, and does not depend on `encoding/json` or `interface{}`, thats one of the reasons why they are so fast. `easyjson` also use a bit of `unsafe` package to reduce memory consuption (in theory it can lead to some unexpected GC issue, but i did not tested enough)
|
||||||
|
|
||||||
|
Also last benchmark did not included `EachKey` test, because in this particular case we need to read lot of Array values, and using `ArrayEach` is more efficient.
|
||||||
|
|
||||||
|
## Questions and support
|
||||||
|
|
||||||
|
All bug-reports and suggestions should go though Github Issues.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
1. Fork it
|
||||||
|
2. Create your feature branch (git checkout -b my-new-feature)
|
||||||
|
3. Commit your changes (git commit -am 'Added some feature')
|
||||||
|
4. Push to the branch (git push origin my-new-feature)
|
||||||
|
5. Create new Pull Request
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
All my development happens using Docker, and repo include some Make tasks to simplify development.
|
||||||
|
|
||||||
|
* `make build` - builds docker image, usually can be called only once
|
||||||
|
* `make test` - run tests
|
||||||
|
* `make fmt` - run go fmt
|
||||||
|
* `make bench` - run benchmarks (if you need to run only single benchmark modify `BENCHMARK` variable in make file)
|
||||||
|
* `make profile` - runs benchmark and generate 3 files- `cpu.out`, `mem.mprof` and `benchmark.test` binary, which can be used for `go tool pprof`
|
||||||
|
* `make bash` - enter container (i use it for running `go tool pprof` above)
|
47
vendor/github.com/buger/jsonparser/bytes.go
generated
vendored
Normal file
47
vendor/github.com/buger/jsonparser/bytes.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
package jsonparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
bio "bytes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// minInt64 '-9223372036854775808' is the smallest representable number in int64
|
||||||
|
const minInt64 = `9223372036854775808`
|
||||||
|
|
||||||
|
// About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON
|
||||||
|
func parseInt(bytes []byte) (v int64, ok bool, overflow bool) {
|
||||||
|
if len(bytes) == 0 {
|
||||||
|
return 0, false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
var neg bool = false
|
||||||
|
if bytes[0] == '-' {
|
||||||
|
neg = true
|
||||||
|
bytes = bytes[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
var b int64 = 0
|
||||||
|
for _, c := range bytes {
|
||||||
|
if c >= '0' && c <= '9' {
|
||||||
|
b = (10 * v) + int64(c-'0')
|
||||||
|
} else {
|
||||||
|
return 0, false, false
|
||||||
|
}
|
||||||
|
if overflow = (b < v); overflow {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v = b
|
||||||
|
}
|
||||||
|
|
||||||
|
if overflow {
|
||||||
|
if neg && bio.Equal(bytes, []byte(minInt64)) {
|
||||||
|
return b, true, false
|
||||||
|
}
|
||||||
|
return 0, false, true
|
||||||
|
}
|
||||||
|
|
||||||
|
if neg {
|
||||||
|
return -v, true, false
|
||||||
|
} else {
|
||||||
|
return v, true, false
|
||||||
|
}
|
||||||
|
}
|
25
vendor/github.com/buger/jsonparser/bytes_safe.go
generated
vendored
Normal file
25
vendor/github.com/buger/jsonparser/bytes_safe.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
// +build appengine appenginevm
|
||||||
|
|
||||||
|
package jsonparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// See fastbytes_unsafe.go for explanation on why *[]byte is used (signatures must be consistent with those in that file)
|
||||||
|
|
||||||
|
func equalStr(b *[]byte, s string) bool {
|
||||||
|
return string(*b) == s
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFloat(b *[]byte) (float64, error) {
|
||||||
|
return strconv.ParseFloat(string(*b), 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesToString(b *[]byte) string {
|
||||||
|
return string(*b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StringToBytes(s string) []byte {
|
||||||
|
return []byte(s)
|
||||||
|
}
|
44
vendor/github.com/buger/jsonparser/bytes_unsafe.go
generated
vendored
Normal file
44
vendor/github.com/buger/jsonparser/bytes_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
// +build !appengine,!appenginevm
|
||||||
|
|
||||||
|
package jsonparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"unsafe"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
//
|
||||||
|
// The reason for using *[]byte rather than []byte in parameters is an optimization. As of Go 1.6,
|
||||||
|
// the compiler cannot perfectly inline the function when using a non-pointer slice. That is,
|
||||||
|
// the non-pointer []byte parameter version is slower than if its function body is manually
|
||||||
|
// inlined, whereas the pointer []byte version is equally fast to the manually inlined
|
||||||
|
// version. Instruction count in assembly taken from "go tool compile" confirms this difference.
|
||||||
|
//
|
||||||
|
// TODO: Remove hack after Go 1.7 release
|
||||||
|
//
|
||||||
|
func equalStr(b *[]byte, s string) bool {
|
||||||
|
return *(*string)(unsafe.Pointer(b)) == s
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFloat(b *[]byte) (float64, error) {
|
||||||
|
return strconv.ParseFloat(*(*string)(unsafe.Pointer(b)), 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A hack until issue golang/go#2632 is fixed.
|
||||||
|
// See: https://github.com/golang/go/issues/2632
|
||||||
|
func bytesToString(b *[]byte) string {
|
||||||
|
return *(*string)(unsafe.Pointer(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func StringToBytes(s string) []byte {
|
||||||
|
b := make([]byte, 0, 0)
|
||||||
|
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
|
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||||
|
bh.Data = sh.Data
|
||||||
|
bh.Cap = sh.Len
|
||||||
|
bh.Len = sh.Len
|
||||||
|
runtime.KeepAlive(s)
|
||||||
|
return b
|
||||||
|
}
|
173
vendor/github.com/buger/jsonparser/escape.go
generated
vendored
Normal file
173
vendor/github.com/buger/jsonparser/escape.go
generated
vendored
Normal file
|
@ -0,0 +1,173 @@
|
||||||
|
package jsonparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSON Unicode stuff: see https://tools.ietf.org/html/rfc7159#section-7
|
||||||
|
|
||||||
|
const supplementalPlanesOffset = 0x10000
|
||||||
|
const highSurrogateOffset = 0xD800
|
||||||
|
const lowSurrogateOffset = 0xDC00
|
||||||
|
|
||||||
|
const basicMultilingualPlaneReservedOffset = 0xDFFF
|
||||||
|
const basicMultilingualPlaneOffset = 0xFFFF
|
||||||
|
|
||||||
|
func combineUTF16Surrogates(high, low rune) rune {
|
||||||
|
return supplementalPlanesOffset + (high-highSurrogateOffset)<<10 + (low - lowSurrogateOffset)
|
||||||
|
}
|
||||||
|
|
||||||
|
const badHex = -1
|
||||||
|
|
||||||
|
func h2I(c byte) int {
|
||||||
|
switch {
|
||||||
|
case c >= '0' && c <= '9':
|
||||||
|
return int(c - '0')
|
||||||
|
case c >= 'A' && c <= 'F':
|
||||||
|
return int(c - 'A' + 10)
|
||||||
|
case c >= 'a' && c <= 'f':
|
||||||
|
return int(c - 'a' + 10)
|
||||||
|
}
|
||||||
|
return badHex
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeSingleUnicodeEscape decodes a single \uXXXX escape sequence. The prefix \u is assumed to be present and
|
||||||
|
// is not checked.
|
||||||
|
// In JSON, these escapes can either come alone or as part of "UTF16 surrogate pairs" that must be handled together.
|
||||||
|
// This function only handles one; decodeUnicodeEscape handles this more complex case.
|
||||||
|
func decodeSingleUnicodeEscape(in []byte) (rune, bool) {
|
||||||
|
// We need at least 6 characters total
|
||||||
|
if len(in) < 6 {
|
||||||
|
return utf8.RuneError, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert hex to decimal
|
||||||
|
h1, h2, h3, h4 := h2I(in[2]), h2I(in[3]), h2I(in[4]), h2I(in[5])
|
||||||
|
if h1 == badHex || h2 == badHex || h3 == badHex || h4 == badHex {
|
||||||
|
return utf8.RuneError, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compose the hex digits
|
||||||
|
return rune(h1<<12 + h2<<8 + h3<<4 + h4), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// isUTF16EncodedRune checks if a rune is in the range for non-BMP characters,
|
||||||
|
// which is used to describe UTF16 chars.
|
||||||
|
// Source: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
|
||||||
|
func isUTF16EncodedRune(r rune) bool {
|
||||||
|
return highSurrogateOffset <= r && r <= basicMultilingualPlaneReservedOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeUnicodeEscape(in []byte) (rune, int) {
|
||||||
|
if r, ok := decodeSingleUnicodeEscape(in); !ok {
|
||||||
|
// Invalid Unicode escape
|
||||||
|
return utf8.RuneError, -1
|
||||||
|
} else if r <= basicMultilingualPlaneOffset && !isUTF16EncodedRune(r) {
|
||||||
|
// Valid Unicode escape in Basic Multilingual Plane
|
||||||
|
return r, 6
|
||||||
|
} else if r2, ok := decodeSingleUnicodeEscape(in[6:]); !ok { // Note: previous decodeSingleUnicodeEscape success guarantees at least 6 bytes remain
|
||||||
|
// UTF16 "high surrogate" without manditory valid following Unicode escape for the "low surrogate"
|
||||||
|
return utf8.RuneError, -1
|
||||||
|
} else if r2 < lowSurrogateOffset {
|
||||||
|
// Invalid UTF16 "low surrogate"
|
||||||
|
return utf8.RuneError, -1
|
||||||
|
} else {
|
||||||
|
// Valid UTF16 surrogate pair
|
||||||
|
return combineUTF16Surrogates(r, r2), 12
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// backslashCharEscapeTable: when '\X' is found for some byte X, it is to be replaced with backslashCharEscapeTable[X]
|
||||||
|
var backslashCharEscapeTable = [...]byte{
|
||||||
|
'"': '"',
|
||||||
|
'\\': '\\',
|
||||||
|
'/': '/',
|
||||||
|
'b': '\b',
|
||||||
|
'f': '\f',
|
||||||
|
'n': '\n',
|
||||||
|
'r': '\r',
|
||||||
|
't': '\t',
|
||||||
|
}
|
||||||
|
|
||||||
|
// unescapeToUTF8 unescapes the single escape sequence starting at 'in' into 'out' and returns
|
||||||
|
// how many characters were consumed from 'in' and emitted into 'out'.
|
||||||
|
// If a valid escape sequence does not appear as a prefix of 'in', (-1, -1) to signal the error.
|
||||||
|
func unescapeToUTF8(in, out []byte) (inLen int, outLen int) {
|
||||||
|
if len(in) < 2 || in[0] != '\\' {
|
||||||
|
// Invalid escape due to insufficient characters for any escape or no initial backslash
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc7159#section-7
|
||||||
|
switch e := in[1]; e {
|
||||||
|
case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
|
||||||
|
// Valid basic 2-character escapes (use lookup table)
|
||||||
|
out[0] = backslashCharEscapeTable[e]
|
||||||
|
return 2, 1
|
||||||
|
case 'u':
|
||||||
|
// Unicode escape
|
||||||
|
if r, inLen := decodeUnicodeEscape(in); inLen == -1 {
|
||||||
|
// Invalid Unicode escape
|
||||||
|
return -1, -1
|
||||||
|
} else {
|
||||||
|
// Valid Unicode escape; re-encode as UTF8
|
||||||
|
outLen := utf8.EncodeRune(out, r)
|
||||||
|
return inLen, outLen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// unescape unescapes the string contained in 'in' and returns it as a slice.
|
||||||
|
// If 'in' contains no escaped characters:
|
||||||
|
// Returns 'in'.
|
||||||
|
// Else, if 'out' is of sufficient capacity (guaranteed if cap(out) >= len(in)):
|
||||||
|
// 'out' is used to build the unescaped string and is returned with no extra allocation
|
||||||
|
// Else:
|
||||||
|
// A new slice is allocated and returned.
|
||||||
|
func Unescape(in, out []byte) ([]byte, error) {
|
||||||
|
firstBackslash := bytes.IndexByte(in, '\\')
|
||||||
|
if firstBackslash == -1 {
|
||||||
|
return in, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a buffer of sufficient size (allocate if needed)
|
||||||
|
if cap(out) < len(in) {
|
||||||
|
out = make([]byte, len(in))
|
||||||
|
} else {
|
||||||
|
out = out[0:len(in)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the first sequence of unescaped bytes to the output and obtain a buffer pointer (subslice)
|
||||||
|
copy(out, in[:firstBackslash])
|
||||||
|
in = in[firstBackslash:]
|
||||||
|
buf := out[firstBackslash:]
|
||||||
|
|
||||||
|
for len(in) > 0 {
|
||||||
|
// Unescape the next escaped character
|
||||||
|
inLen, bufLen := unescapeToUTF8(in, buf)
|
||||||
|
if inLen == -1 {
|
||||||
|
return nil, MalformedStringEscapeError
|
||||||
|
}
|
||||||
|
|
||||||
|
in = in[inLen:]
|
||||||
|
buf = buf[bufLen:]
|
||||||
|
|
||||||
|
// Copy everything up until the next backslash
|
||||||
|
nextBackslash := bytes.IndexByte(in, '\\')
|
||||||
|
if nextBackslash == -1 {
|
||||||
|
copy(buf, in)
|
||||||
|
buf = buf[len(in):]
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
copy(buf, in[:nextBackslash])
|
||||||
|
buf = buf[nextBackslash:]
|
||||||
|
in = in[nextBackslash:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trim the out buffer to the amount that was actually emitted
|
||||||
|
return out[:len(out)-len(buf)], nil
|
||||||
|
}
|
117
vendor/github.com/buger/jsonparser/fuzz.go
generated
vendored
Normal file
117
vendor/github.com/buger/jsonparser/fuzz.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
package jsonparser
|
||||||
|
|
||||||
|
func FuzzParseString(data []byte) int {
|
||||||
|
r, err := ParseString(data)
|
||||||
|
if err != nil || r == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzEachKey(data []byte) int {
|
||||||
|
paths := [][]string{
|
||||||
|
{"name"},
|
||||||
|
{"order"},
|
||||||
|
{"nested", "a"},
|
||||||
|
{"nested", "b"},
|
||||||
|
{"nested2", "a"},
|
||||||
|
{"nested", "nested3", "b"},
|
||||||
|
{"arr", "[1]", "b"},
|
||||||
|
{"arrInt", "[3]"},
|
||||||
|
{"arrInt", "[5]"},
|
||||||
|
{"nested"},
|
||||||
|
{"arr", "["},
|
||||||
|
{"a\n", "b\n"},
|
||||||
|
}
|
||||||
|
EachKey(data, func(idx int, value []byte, vt ValueType, err error) {}, paths...)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzDelete(data []byte) int {
|
||||||
|
Delete(data, "test")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzSet(data []byte) int {
|
||||||
|
_, err := Set(data, []byte(`"new value"`), "test")
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzObjectEach(data []byte) int {
|
||||||
|
_ = ObjectEach(data, func(key, value []byte, valueType ValueType, off int) error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzParseFloat(data []byte) int {
|
||||||
|
_, err := ParseFloat(data)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzParseInt(data []byte) int {
|
||||||
|
_, err := ParseInt(data)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzParseBool(data []byte) int {
|
||||||
|
_, err := ParseBoolean(data)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzTokenStart(data []byte) int {
|
||||||
|
_ = tokenStart(data)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzGetString(data []byte) int {
|
||||||
|
_, err := GetString(data, "test")
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzGetFloat(data []byte) int {
|
||||||
|
_, err := GetFloat(data, "test")
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzGetInt(data []byte) int {
|
||||||
|
_, err := GetInt(data, "test")
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzGetBoolean(data []byte) int {
|
||||||
|
_, err := GetBoolean(data, "test")
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzGetUnsafeString(data []byte) int {
|
||||||
|
_, err := GetUnsafeString(data, "test")
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
47
vendor/github.com/buger/jsonparser/oss-fuzz-build.sh
generated
vendored
Normal file
47
vendor/github.com/buger/jsonparser/oss-fuzz-build.sh
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
#!/bin/bash -eu
|
||||||
|
|
||||||
|
git clone https://github.com/dvyukov/go-fuzz-corpus
|
||||||
|
zip corpus.zip go-fuzz-corpus/json/corpus/*
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzparsestring_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzParseString fuzzparsestring
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzeachkey_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzEachKey fuzzeachkey
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzdelete_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzDelete fuzzdelete
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzset_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzSet fuzzset
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzobjecteach_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzObjectEach fuzzobjecteach
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzparsefloat_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzParseFloat fuzzparsefloat
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzparseint_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzParseInt fuzzparseint
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzparsebool_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzParseBool fuzzparsebool
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzztokenstart_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzTokenStart fuzztokenstart
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzgetstring_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzGetString fuzzgetstring
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzgetfloat_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzGetFloat fuzzgetfloat
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzgetint_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzGetInt fuzzgetint
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzgetboolean_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzGetBoolean fuzzgetboolean
|
||||||
|
|
||||||
|
cp corpus.zip $OUT/fuzzgetunsafestring_seed_corpus.zip
|
||||||
|
compile_go_fuzzer github.com/buger/jsonparser FuzzGetUnsafeString fuzzgetunsafestring
|
||||||
|
|
1283
vendor/github.com/buger/jsonparser/parser.go
generated
vendored
Normal file
1283
vendor/github.com/buger/jsonparser/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
49
vendor/github.com/uptrace/bun/CHANGELOG.md
generated
vendored
49
vendor/github.com/uptrace/bun/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,52 @@
|
||||||
|
## [1.2.6](https://github.com/uptrace/bun/compare/v1.2.5...v1.2.6) (2024-11-20)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* append IDENTITY to ADD COLUMN statement if needed ([694f873](https://github.com/uptrace/bun/commit/694f873d61ed8d2f09032ae0c0dbec4b71c3719e))
|
||||||
|
* **ci:** prune stale should be executed at 3 AM every day ([0cedcb0](https://github.com/uptrace/bun/commit/0cedcb068229b63041a4f48de12bb767c8454048))
|
||||||
|
* cleanup after testUniqueRenamedTable ([b1ae32e](https://github.com/uptrace/bun/commit/b1ae32e9e9f45ff2a66e50bfd13bedcf6653d874))
|
||||||
|
* fix go.mod of oracledialect ([89e21ea](https://github.com/uptrace/bun/commit/89e21eab362c60511cca00890ae29551a2ba7c46))
|
||||||
|
* has many relationship with multiple columns ([1664b2c](https://github.com/uptrace/bun/commit/1664b2c07a5f6cfd3b6730e5005373686e9830a6))
|
||||||
|
* ignore case for type equivalence ([c3253a5](https://github.com/uptrace/bun/commit/c3253a5c59b078607db9e216ddc11afdef546e05))
|
||||||
|
* implement DefaultSchema for Oracle dialect ([d08fa40](https://github.com/uptrace/bun/commit/d08fa40cc87d67296a83a77448ea511531fc8cdd))
|
||||||
|
* **oracledialect:** add go.mod file so the dialect is released properly ([#1043](https://github.com/uptrace/bun/issues/1043)) ([1bb5597](https://github.com/uptrace/bun/commit/1bb5597f1a32f5d693101ef4a62e25d99f5b9db5))
|
||||||
|
* **oracledialect:** update go.mod by go mod tidy to fix tests ([7f90a15](https://github.com/uptrace/bun/commit/7f90a15c51a2482dda94226dd13b913d6b470a29))
|
||||||
|
* **pgdialect:** array value quoting ([892c416](https://github.com/uptrace/bun/commit/892c416272a8428c592896d65d3ad51a6f2356d8))
|
||||||
|
* remove schema name from t.Name during bun-schema inspection ([31ed582](https://github.com/uptrace/bun/commit/31ed58254ad08143d88684672acd33ce044ea5a9))
|
||||||
|
* rename column only if the name does not exist in 'target' ([fed6012](https://github.com/uptrace/bun/commit/fed6012d177e55b8320b31ef37fc02a0cbf0b9f5))
|
||||||
|
* support embed with tag Unique ([3acd6dd](https://github.com/uptrace/bun/commit/3acd6dd8546118d7b867ca796a5e56311edad070))
|
||||||
|
* update oracledialect/version.go in release.sh ([bcd070f](https://github.com/uptrace/bun/commit/bcd070f48a75d0092a5620261658c9c5994f0bf6))
|
||||||
|
* update schema.Field names ([9b810de](https://github.com/uptrace/bun/commit/9b810dee4b1a721efb82c913099f39f52c44eb57))
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* add and drop columns ([3fdd5b8](https://github.com/uptrace/bun/commit/3fdd5b8f635f849a74e78c665274609f75245b19))
|
||||||
|
* add and drop IDENTITY ([dd83779](https://github.com/uptrace/bun/commit/dd837795c31490fd8816eec0e9833e79fafdda32))
|
||||||
|
* add support type for net/netip.addr and net/netip.prefix ([#1028](https://github.com/uptrace/bun/issues/1028)) ([95c4a8e](https://github.com/uptrace/bun/commit/95c4a8ebd634e1e99114727a7b157eeeb9297ee9))
|
||||||
|
* **automigrate:** detect renamed tables ([c03938f](https://github.com/uptrace/bun/commit/c03938ff5e9fa2f653e4c60668b1368357d2de10))
|
||||||
|
* change column type ([3cfd8c6](https://github.com/uptrace/bun/commit/3cfd8c62125786aaf6f493acc5b39f4d3db3d628))
|
||||||
|
* **ci:** support release on osx ([435510b](https://github.com/uptrace/bun/commit/435510b0a73b0d9e6d06e3e3c3f0fa4379e9ed8c))
|
||||||
|
* create sql migrations and apply them ([1bf7cfd](https://github.com/uptrace/bun/commit/1bf7cfd067e0e26ae212b0f7421e5abc6f67fb4f))
|
||||||
|
* create transactional migration files ([c3320f6](https://github.com/uptrace/bun/commit/c3320f624830dc2fe99af2c7cbe492b2a83f9e4a))
|
||||||
|
* detect Create/Drop table ([408859f](https://github.com/uptrace/bun/commit/408859f07be38236b39a00909cdce55d49f6f824))
|
||||||
|
* detect modified relations ([a918dc4](https://github.com/uptrace/bun/commit/a918dc472a33dd24c5fffd4d048bcf49f2e07a42))
|
||||||
|
* detect renamed columns ([886d0a5](https://github.com/uptrace/bun/commit/886d0a5b18aba272f1c86af2a2cf68ce4c8879f2))
|
||||||
|
* detect renamed tables ([8857bab](https://github.com/uptrace/bun/commit/8857bab54b94170d218633f3b210d379e4e51a21))
|
||||||
|
* enhance Apply method to accept multiple functions ([7823f2f](https://github.com/uptrace/bun/commit/7823f2f24c814e104dc59475156255c7b3b26144))
|
||||||
|
* implement fmt.Stringer queries ([5060e47](https://github.com/uptrace/bun/commit/5060e47db13451a982e48d0f14055a58ba60b472))
|
||||||
|
* improve FK handling ([a822fc5](https://github.com/uptrace/bun/commit/a822fc5f8ae547b7cd41e1ca35609d519d78598b))
|
||||||
|
* include target schema name in migration name ([ac8d221](https://github.com/uptrace/bun/commit/ac8d221e6443b469e794314c5fc189250fa542d5))
|
||||||
|
* **mariadb:** support RETURNING clause in DELETE statement ([b8dec9d](https://github.com/uptrace/bun/commit/b8dec9d9a06124696bd5ee2abbf33f19087174b6))
|
||||||
|
* migrate FKs ([4c1dfdb](https://github.com/uptrace/bun/commit/4c1dfdbe99c73d0c0f2d7b1f8b11adf30c6a41f7))
|
||||||
|
* **mysql:** support ORDER BY and LIMIT clauses in UPDATE and DELETE statements ([de71bed](https://github.com/uptrace/bun/commit/de71bed9252980648269af85b7a51cbc464ce710))
|
||||||
|
* support modifying primary keys ([a734629](https://github.com/uptrace/bun/commit/a734629fa285406038cbe4a50798626b5ac08539))
|
||||||
|
* support UNIQUE constraints ([3c4d5d2](https://github.com/uptrace/bun/commit/3c4d5d2c47be4652fb9b5cf1c6bd7b6c0a437287))
|
||||||
|
* use *bun.DB in MigratorDialect ([a8788bf](https://github.com/uptrace/bun/commit/a8788bf62cbcc954a08532c299c774262de7a81d))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## [1.2.5](https://github.com/uptrace/bun/compare/v1.2.3...v1.2.5) (2024-10-26)
|
## [1.2.5](https://github.com/uptrace/bun/compare/v1.2.3...v1.2.5) (2024-10-26)
|
||||||
|
|
||||||
|
|
||||||
|
|
2
vendor/github.com/uptrace/bun/Makefile
generated
vendored
2
vendor/github.com/uptrace/bun/Makefile
generated
vendored
|
@ -6,7 +6,7 @@ test:
|
||||||
echo "go test in $${dir}"; \
|
echo "go test in $${dir}"; \
|
||||||
(cd "$${dir}" && \
|
(cd "$${dir}" && \
|
||||||
go test && \
|
go test && \
|
||||||
env GOOS=linux GOARCH=386 go test && \
|
env GOOS=linux GOARCH=386 TZ= go test && \
|
||||||
go vet); \
|
go vet); \
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
1
vendor/github.com/uptrace/bun/README.md
generated
vendored
1
vendor/github.com/uptrace/bun/README.md
generated
vendored
|
@ -4,6 +4,7 @@
|
||||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/bun)](https://pkg.go.dev/github.com/uptrace/bun)
|
[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/bun)](https://pkg.go.dev/github.com/uptrace/bun)
|
||||||
[![Documentation](https://img.shields.io/badge/bun-documentation-informational)](https://bun.uptrace.dev/)
|
[![Documentation](https://img.shields.io/badge/bun-documentation-informational)](https://bun.uptrace.dev/)
|
||||||
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
|
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
|
||||||
|
[![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20Bun%20Guru-006BFF)](https://gurubase.io/g/bun)
|
||||||
|
|
||||||
> Bun is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). Uptrace
|
> Bun is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). Uptrace
|
||||||
> is an open-source APM tool that supports distributed tracing, metrics, and logs. You can use it to
|
> is an open-source APM tool that supports distributed tracing, metrics, and logs. You can use it to
|
||||||
|
|
3
vendor/github.com/uptrace/bun/db.go
generated
vendored
3
vendor/github.com/uptrace/bun/db.go
generated
vendored
|
@ -703,6 +703,5 @@ func (tx Tx) NewDropColumn() *DropColumnQuery {
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
func (db *DB) makeQueryBytes() []byte {
|
func (db *DB) makeQueryBytes() []byte {
|
||||||
// TODO: make this configurable?
|
return internal.MakeQueryBytes()
|
||||||
return make([]byte, 0, 4096)
|
|
||||||
}
|
}
|
||||||
|
|
18
vendor/github.com/uptrace/bun/dialect/append.go
generated
vendored
18
vendor/github.com/uptrace/bun/dialect/append.go
generated
vendored
|
@ -25,24 +25,24 @@ func AppendBool(b []byte, v bool) []byte {
|
||||||
return append(b, "FALSE"...)
|
return append(b, "FALSE"...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func AppendFloat32(b []byte, v float32) []byte {
|
func AppendFloat32(b []byte, num float32) []byte {
|
||||||
return appendFloat(b, float64(v), 32)
|
return appendFloat(b, float64(num), 32)
|
||||||
}
|
}
|
||||||
|
|
||||||
func AppendFloat64(b []byte, v float64) []byte {
|
func AppendFloat64(b []byte, num float64) []byte {
|
||||||
return appendFloat(b, v, 64)
|
return appendFloat(b, num, 64)
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendFloat(b []byte, v float64, bitSize int) []byte {
|
func appendFloat(b []byte, num float64, bitSize int) []byte {
|
||||||
switch {
|
switch {
|
||||||
case math.IsNaN(v):
|
case math.IsNaN(num):
|
||||||
return append(b, "'NaN'"...)
|
return append(b, "'NaN'"...)
|
||||||
case math.IsInf(v, 1):
|
case math.IsInf(num, 1):
|
||||||
return append(b, "'Infinity'"...)
|
return append(b, "'Infinity'"...)
|
||||||
case math.IsInf(v, -1):
|
case math.IsInf(num, -1):
|
||||||
return append(b, "'-Infinity'"...)
|
return append(b, "'-Infinity'"...)
|
||||||
default:
|
default:
|
||||||
return strconv.AppendFloat(b, v, 'f', -1, bitSize)
|
return strconv.AppendFloat(b, num, 'f', -1, bitSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
3
vendor/github.com/uptrace/bun/dialect/feature/feature.go
generated
vendored
3
vendor/github.com/uptrace/bun/dialect/feature/feature.go
generated
vendored
|
@ -32,4 +32,7 @@
|
||||||
MSSavepoint
|
MSSavepoint
|
||||||
GeneratedIdentity
|
GeneratedIdentity
|
||||||
CompositeIn // ... WHERE (A,B) IN ((N, NN), (N, NN)...)
|
CompositeIn // ... WHERE (A,B) IN ((N, NN), (N, NN)...)
|
||||||
|
UpdateOrderLimit // UPDATE ... ORDER BY ... LIMIT ...
|
||||||
|
DeleteOrderLimit // DELETE ... ORDER BY ... LIMIT ...
|
||||||
|
DeleteReturning
|
||||||
)
|
)
|
||||||
|
|
245
vendor/github.com/uptrace/bun/dialect/pgdialect/alter_table.go
generated
vendored
Normal file
245
vendor/github.com/uptrace/bun/dialect/pgdialect/alter_table.go
generated
vendored
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
package pgdialect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/migrate"
|
||||||
|
"github.com/uptrace/bun/migrate/sqlschema"
|
||||||
|
"github.com/uptrace/bun/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Dialect) NewMigrator(db *bun.DB, schemaName string) sqlschema.Migrator {
|
||||||
|
return &migrator{db: db, schemaName: schemaName, BaseMigrator: sqlschema.NewBaseMigrator(db)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type migrator struct {
|
||||||
|
*sqlschema.BaseMigrator
|
||||||
|
|
||||||
|
db *bun.DB
|
||||||
|
schemaName string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ sqlschema.Migrator = (*migrator)(nil)
|
||||||
|
|
||||||
|
func (m *migrator) AppendSQL(b []byte, operation interface{}) (_ []byte, err error) {
|
||||||
|
fmter := m.db.Formatter()
|
||||||
|
|
||||||
|
// Append ALTER TABLE statement to the enclosed query bytes []byte.
|
||||||
|
appendAlterTable := func(query []byte, tableName string) []byte {
|
||||||
|
query = append(query, "ALTER TABLE "...)
|
||||||
|
query = m.appendFQN(fmter, query, tableName)
|
||||||
|
return append(query, " "...)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch change := operation.(type) {
|
||||||
|
case *migrate.CreateTableOp:
|
||||||
|
return m.AppendCreateTable(b, change.Model)
|
||||||
|
case *migrate.DropTableOp:
|
||||||
|
return m.AppendDropTable(b, m.schemaName, change.TableName)
|
||||||
|
case *migrate.RenameTableOp:
|
||||||
|
b, err = m.renameTable(fmter, appendAlterTable(b, change.TableName), change)
|
||||||
|
case *migrate.RenameColumnOp:
|
||||||
|
b, err = m.renameColumn(fmter, appendAlterTable(b, change.TableName), change)
|
||||||
|
case *migrate.AddColumnOp:
|
||||||
|
b, err = m.addColumn(fmter, appendAlterTable(b, change.TableName), change)
|
||||||
|
case *migrate.DropColumnOp:
|
||||||
|
b, err = m.dropColumn(fmter, appendAlterTable(b, change.TableName), change)
|
||||||
|
case *migrate.AddPrimaryKeyOp:
|
||||||
|
b, err = m.addPrimaryKey(fmter, appendAlterTable(b, change.TableName), change.PrimaryKey)
|
||||||
|
case *migrate.ChangePrimaryKeyOp:
|
||||||
|
b, err = m.changePrimaryKey(fmter, appendAlterTable(b, change.TableName), change)
|
||||||
|
case *migrate.DropPrimaryKeyOp:
|
||||||
|
b, err = m.dropConstraint(fmter, appendAlterTable(b, change.TableName), change.PrimaryKey.Name)
|
||||||
|
case *migrate.AddUniqueConstraintOp:
|
||||||
|
b, err = m.addUnique(fmter, appendAlterTable(b, change.TableName), change)
|
||||||
|
case *migrate.DropUniqueConstraintOp:
|
||||||
|
b, err = m.dropConstraint(fmter, appendAlterTable(b, change.TableName), change.Unique.Name)
|
||||||
|
case *migrate.ChangeColumnTypeOp:
|
||||||
|
b, err = m.changeColumnType(fmter, appendAlterTable(b, change.TableName), change)
|
||||||
|
case *migrate.AddForeignKeyOp:
|
||||||
|
b, err = m.addForeignKey(fmter, appendAlterTable(b, change.TableName()), change)
|
||||||
|
case *migrate.DropForeignKeyOp:
|
||||||
|
b, err = m.dropConstraint(fmter, appendAlterTable(b, change.TableName()), change.ConstraintName)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("append sql: unknown operation %T", change)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("append sql: %w", err)
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) appendFQN(fmter schema.Formatter, b []byte, tableName string) []byte {
|
||||||
|
return fmter.AppendQuery(b, "?.?", bun.Ident(m.schemaName), bun.Ident(tableName))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) renameTable(fmter schema.Formatter, b []byte, rename *migrate.RenameTableOp) (_ []byte, err error) {
|
||||||
|
b = append(b, "RENAME TO "...)
|
||||||
|
b = fmter.AppendName(b, rename.NewName)
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) renameColumn(fmter schema.Formatter, b []byte, rename *migrate.RenameColumnOp) (_ []byte, err error) {
|
||||||
|
b = append(b, "RENAME COLUMN "...)
|
||||||
|
b = fmter.AppendName(b, rename.OldName)
|
||||||
|
|
||||||
|
b = append(b, " TO "...)
|
||||||
|
b = fmter.AppendName(b, rename.NewName)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) addColumn(fmter schema.Formatter, b []byte, add *migrate.AddColumnOp) (_ []byte, err error) {
|
||||||
|
b = append(b, "ADD COLUMN "...)
|
||||||
|
b = fmter.AppendName(b, add.ColumnName)
|
||||||
|
b = append(b, " "...)
|
||||||
|
|
||||||
|
b, err = add.Column.AppendQuery(fmter, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if add.Column.GetDefaultValue() != "" {
|
||||||
|
b = append(b, " DEFAULT "...)
|
||||||
|
b = append(b, add.Column.GetDefaultValue()...)
|
||||||
|
b = append(b, " "...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if add.Column.GetIsIdentity() {
|
||||||
|
b = appendGeneratedAsIdentity(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) dropColumn(fmter schema.Formatter, b []byte, drop *migrate.DropColumnOp) (_ []byte, err error) {
|
||||||
|
b = append(b, "DROP COLUMN "...)
|
||||||
|
b = fmter.AppendName(b, drop.ColumnName)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) addPrimaryKey(fmter schema.Formatter, b []byte, pk sqlschema.PrimaryKey) (_ []byte, err error) {
|
||||||
|
b = append(b, "ADD PRIMARY KEY ("...)
|
||||||
|
b, _ = pk.Columns.AppendQuery(fmter, b)
|
||||||
|
b = append(b, ")"...)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) changePrimaryKey(fmter schema.Formatter, b []byte, change *migrate.ChangePrimaryKeyOp) (_ []byte, err error) {
|
||||||
|
b, _ = m.dropConstraint(fmter, b, change.Old.Name)
|
||||||
|
b = append(b, ", "...)
|
||||||
|
b, _ = m.addPrimaryKey(fmter, b, change.New)
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) addUnique(fmter schema.Formatter, b []byte, change *migrate.AddUniqueConstraintOp) (_ []byte, err error) {
|
||||||
|
b = append(b, "ADD CONSTRAINT "...)
|
||||||
|
if change.Unique.Name != "" {
|
||||||
|
b = fmter.AppendName(b, change.Unique.Name)
|
||||||
|
} else {
|
||||||
|
// Default naming scheme for unique constraints in Postgres is <table>_<column>_key
|
||||||
|
b = fmter.AppendName(b, fmt.Sprintf("%s_%s_key", change.TableName, change.Unique.Columns))
|
||||||
|
}
|
||||||
|
b = append(b, " UNIQUE ("...)
|
||||||
|
b, _ = change.Unique.Columns.AppendQuery(fmter, b)
|
||||||
|
b = append(b, ")"...)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) dropConstraint(fmter schema.Formatter, b []byte, name string) (_ []byte, err error) {
|
||||||
|
b = append(b, "DROP CONSTRAINT "...)
|
||||||
|
b = fmter.AppendName(b, name)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) addForeignKey(fmter schema.Formatter, b []byte, add *migrate.AddForeignKeyOp) (_ []byte, err error) {
|
||||||
|
b = append(b, "ADD CONSTRAINT "...)
|
||||||
|
|
||||||
|
name := add.ConstraintName
|
||||||
|
if name == "" {
|
||||||
|
colRef := add.ForeignKey.From
|
||||||
|
columns := strings.Join(colRef.Column.Split(), "_")
|
||||||
|
name = fmt.Sprintf("%s_%s_fkey", colRef.TableName, columns)
|
||||||
|
}
|
||||||
|
b = fmter.AppendName(b, name)
|
||||||
|
|
||||||
|
b = append(b, " FOREIGN KEY ("...)
|
||||||
|
if b, err = add.ForeignKey.From.Column.AppendQuery(fmter, b); err != nil {
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
b = append(b, ")"...)
|
||||||
|
|
||||||
|
b = append(b, " REFERENCES "...)
|
||||||
|
b = m.appendFQN(fmter, b, add.ForeignKey.To.TableName)
|
||||||
|
|
||||||
|
b = append(b, " ("...)
|
||||||
|
if b, err = add.ForeignKey.To.Column.AppendQuery(fmter, b); err != nil {
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
b = append(b, ")"...)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrator) changeColumnType(fmter schema.Formatter, b []byte, colDef *migrate.ChangeColumnTypeOp) (_ []byte, err error) {
|
||||||
|
// alterColumn never re-assigns err, so there is no need to check for err != nil after calling it
|
||||||
|
var i int
|
||||||
|
appendAlterColumn := func() {
|
||||||
|
if i > 0 {
|
||||||
|
b = append(b, ", "...)
|
||||||
|
}
|
||||||
|
b = append(b, "ALTER COLUMN "...)
|
||||||
|
b = fmter.AppendName(b, colDef.Column)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
got, want := colDef.From, colDef.To
|
||||||
|
|
||||||
|
inspector := m.db.Dialect().(sqlschema.InspectorDialect)
|
||||||
|
if !inspector.CompareType(want, got) {
|
||||||
|
appendAlterColumn()
|
||||||
|
b = append(b, " SET DATA TYPE "...)
|
||||||
|
if b, err = want.AppendQuery(fmter, b); err != nil {
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Column must be declared NOT NULL before identity can be added.
|
||||||
|
// Although PG can resolve the order of operations itself, we make this explicit in the query.
|
||||||
|
if want.GetIsNullable() != got.GetIsNullable() {
|
||||||
|
appendAlterColumn()
|
||||||
|
if !want.GetIsNullable() {
|
||||||
|
b = append(b, " SET NOT NULL"...)
|
||||||
|
} else {
|
||||||
|
b = append(b, " DROP NOT NULL"...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if want.GetIsIdentity() != got.GetIsIdentity() {
|
||||||
|
appendAlterColumn()
|
||||||
|
if !want.GetIsIdentity() {
|
||||||
|
b = append(b, " DROP IDENTITY"...)
|
||||||
|
} else {
|
||||||
|
b = append(b, " ADD"...)
|
||||||
|
b = appendGeneratedAsIdentity(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if want.GetDefaultValue() != got.GetDefaultValue() {
|
||||||
|
appendAlterColumn()
|
||||||
|
if want.GetDefaultValue() == "" {
|
||||||
|
b = append(b, " DROP DEFAULT"...)
|
||||||
|
} else {
|
||||||
|
b = append(b, " SET DEFAULT "...)
|
||||||
|
b = append(b, want.GetDefaultValue()...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
31
vendor/github.com/uptrace/bun/dialect/pgdialect/array.go
generated
vendored
31
vendor/github.com/uptrace/bun/dialect/pgdialect/array.go
generated
vendored
|
@ -5,6 +5,7 @@
|
||||||
"database/sql/driver"
|
"database/sql/driver"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
@ -159,7 +160,7 @@ func arrayAppend(fmter schema.Formatter, b []byte, v interface{}) []byte {
|
||||||
case int64:
|
case int64:
|
||||||
return strconv.AppendInt(b, v, 10)
|
return strconv.AppendInt(b, v, 10)
|
||||||
case float64:
|
case float64:
|
||||||
return dialect.AppendFloat64(b, v)
|
return arrayAppendFloat64(b, v)
|
||||||
case bool:
|
case bool:
|
||||||
return dialect.AppendBool(b, v)
|
return dialect.AppendBool(b, v)
|
||||||
case []byte:
|
case []byte:
|
||||||
|
@ -167,7 +168,10 @@ func arrayAppend(fmter schema.Formatter, b []byte, v interface{}) []byte {
|
||||||
case string:
|
case string:
|
||||||
return arrayAppendString(b, v)
|
return arrayAppendString(b, v)
|
||||||
case time.Time:
|
case time.Time:
|
||||||
return fmter.Dialect().AppendTime(b, v)
|
b = append(b, '"')
|
||||||
|
b = appendTime(b, v)
|
||||||
|
b = append(b, '"')
|
||||||
|
return b
|
||||||
default:
|
default:
|
||||||
err := fmt.Errorf("pgdialect: can't append %T", v)
|
err := fmt.Errorf("pgdialect: can't append %T", v)
|
||||||
return dialect.AppendError(b, err)
|
return dialect.AppendError(b, err)
|
||||||
|
@ -288,7 +292,7 @@ func appendFloat64Slice(b []byte, floats []float64) []byte {
|
||||||
|
|
||||||
b = append(b, '{')
|
b = append(b, '{')
|
||||||
for _, n := range floats {
|
for _, n := range floats {
|
||||||
b = dialect.AppendFloat64(b, n)
|
b = arrayAppendFloat64(b, n)
|
||||||
b = append(b, ',')
|
b = append(b, ',')
|
||||||
}
|
}
|
||||||
if len(floats) > 0 {
|
if len(floats) > 0 {
|
||||||
|
@ -302,6 +306,19 @@ func appendFloat64Slice(b []byte, floats []float64) []byte {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func arrayAppendFloat64(b []byte, num float64) []byte {
|
||||||
|
switch {
|
||||||
|
case math.IsNaN(num):
|
||||||
|
return append(b, "NaN"...)
|
||||||
|
case math.IsInf(num, 1):
|
||||||
|
return append(b, "Infinity"...)
|
||||||
|
case math.IsInf(num, -1):
|
||||||
|
return append(b, "-Infinity"...)
|
||||||
|
default:
|
||||||
|
return strconv.AppendFloat(b, num, 'f', -1, 64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func appendTimeSliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
func appendTimeSliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||||
ts := v.Convert(sliceTimeType).Interface().([]time.Time)
|
ts := v.Convert(sliceTimeType).Interface().([]time.Time)
|
||||||
return appendTimeSlice(fmter, b, ts)
|
return appendTimeSlice(fmter, b, ts)
|
||||||
|
@ -383,6 +400,10 @@ func arrayScanner(typ reflect.Type) schema.ScannerFunc {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if src == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
b, err := toBytes(src)
|
b, err := toBytes(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -553,7 +574,7 @@ func scanFloat64SliceValue(dest reflect.Value, src interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func scanFloat64Slice(src interface{}) ([]float64, error) {
|
func scanFloat64Slice(src interface{}) ([]float64, error) {
|
||||||
if src == -1 {
|
if src == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -593,7 +614,7 @@ func toBytes(src interface{}) ([]byte, error) {
|
||||||
case []byte:
|
case []byte:
|
||||||
return src, nil
|
return src, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("bun: got %T, wanted []byte or string", src)
|
return nil, fmt.Errorf("pgdialect: got %T, wanted []byte or string", src)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
13
vendor/github.com/uptrace/bun/dialect/pgdialect/dialect.go
generated
vendored
13
vendor/github.com/uptrace/bun/dialect/pgdialect/dialect.go
generated
vendored
|
@ -10,6 +10,7 @@
|
||||||
"github.com/uptrace/bun/dialect"
|
"github.com/uptrace/bun/dialect"
|
||||||
"github.com/uptrace/bun/dialect/feature"
|
"github.com/uptrace/bun/dialect/feature"
|
||||||
"github.com/uptrace/bun/dialect/sqltype"
|
"github.com/uptrace/bun/dialect/sqltype"
|
||||||
|
"github.com/uptrace/bun/migrate/sqlschema"
|
||||||
"github.com/uptrace/bun/schema"
|
"github.com/uptrace/bun/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -29,6 +30,10 @@ type Dialect struct {
|
||||||
features feature.Feature
|
features feature.Feature
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ schema.Dialect = (*Dialect)(nil)
|
||||||
|
var _ sqlschema.InspectorDialect = (*Dialect)(nil)
|
||||||
|
var _ sqlschema.MigratorDialect = (*Dialect)(nil)
|
||||||
|
|
||||||
func New() *Dialect {
|
func New() *Dialect {
|
||||||
d := new(Dialect)
|
d := new(Dialect)
|
||||||
d.tables = schema.NewTables(d)
|
d.tables = schema.NewTables(d)
|
||||||
|
@ -48,7 +53,8 @@ func New() *Dialect {
|
||||||
feature.InsertOnConflict |
|
feature.InsertOnConflict |
|
||||||
feature.SelectExists |
|
feature.SelectExists |
|
||||||
feature.GeneratedIdentity |
|
feature.GeneratedIdentity |
|
||||||
feature.CompositeIn
|
feature.CompositeIn |
|
||||||
|
feature.DeleteReturning
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,5 +124,10 @@ func (d *Dialect) AppendUint64(b []byte, n uint64) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Dialect) AppendSequence(b []byte, _ *schema.Table, _ *schema.Field) []byte {
|
func (d *Dialect) AppendSequence(b []byte, _ *schema.Table, _ *schema.Field) []byte {
|
||||||
|
return appendGeneratedAsIdentity(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendGeneratedAsIdentity appends GENERATED BY DEFAULT AS IDENTITY to the column definition.
|
||||||
|
func appendGeneratedAsIdentity(b []byte) []byte {
|
||||||
return append(b, " GENERATED BY DEFAULT AS IDENTITY"...)
|
return append(b, " GENERATED BY DEFAULT AS IDENTITY"...)
|
||||||
}
|
}
|
||||||
|
|
297
vendor/github.com/uptrace/bun/dialect/pgdialect/inspector.go
generated
vendored
Normal file
297
vendor/github.com/uptrace/bun/dialect/pgdialect/inspector.go
generated
vendored
Normal file
|
@ -0,0 +1,297 @@
|
||||||
|
package pgdialect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/migrate/sqlschema"
|
||||||
|
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Schema = sqlschema.BaseDatabase
|
||||||
|
Table = sqlschema.BaseTable
|
||||||
|
Column = sqlschema.BaseColumn
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Dialect) NewInspector(db *bun.DB, options ...sqlschema.InspectorOption) sqlschema.Inspector {
|
||||||
|
return newInspector(db, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Inspector struct {
|
||||||
|
sqlschema.InspectorConfig
|
||||||
|
db *bun.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ sqlschema.Inspector = (*Inspector)(nil)
|
||||||
|
|
||||||
|
func newInspector(db *bun.DB, options ...sqlschema.InspectorOption) *Inspector {
|
||||||
|
i := &Inspector{db: db}
|
||||||
|
sqlschema.ApplyInspectorOptions(&i.InspectorConfig, options...)
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
|
||||||
|
dbSchema := Schema{
|
||||||
|
Tables: orderedmap.New[string, sqlschema.Table](),
|
||||||
|
ForeignKeys: make(map[sqlschema.ForeignKey]string),
|
||||||
|
}
|
||||||
|
|
||||||
|
exclude := in.ExcludeTables
|
||||||
|
if len(exclude) == 0 {
|
||||||
|
// Avoid getting NOT IN (NULL) if bun.In() is called with an empty slice.
|
||||||
|
exclude = []string{""}
|
||||||
|
}
|
||||||
|
|
||||||
|
var tables []*InformationSchemaTable
|
||||||
|
if err := in.db.NewRaw(sqlInspectTables, in.SchemaName, bun.In(exclude)).Scan(ctx, &tables); err != nil {
|
||||||
|
return dbSchema, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fks []*ForeignKey
|
||||||
|
if err := in.db.NewRaw(sqlInspectForeignKeys, in.SchemaName, bun.In(exclude), bun.In(exclude)).Scan(ctx, &fks); err != nil {
|
||||||
|
return dbSchema, err
|
||||||
|
}
|
||||||
|
dbSchema.ForeignKeys = make(map[sqlschema.ForeignKey]string, len(fks))
|
||||||
|
|
||||||
|
for _, table := range tables {
|
||||||
|
var columns []*InformationSchemaColumn
|
||||||
|
if err := in.db.NewRaw(sqlInspectColumnsQuery, table.Schema, table.Name).Scan(ctx, &columns); err != nil {
|
||||||
|
return dbSchema, err
|
||||||
|
}
|
||||||
|
|
||||||
|
colDefs := orderedmap.New[string, sqlschema.Column]()
|
||||||
|
uniqueGroups := make(map[string][]string)
|
||||||
|
|
||||||
|
for _, c := range columns {
|
||||||
|
def := c.Default
|
||||||
|
if c.IsSerial || c.IsIdentity {
|
||||||
|
def = ""
|
||||||
|
} else if !c.IsDefaultLiteral {
|
||||||
|
def = strings.ToLower(def)
|
||||||
|
}
|
||||||
|
|
||||||
|
colDefs.Set(c.Name, &Column{
|
||||||
|
Name: c.Name,
|
||||||
|
SQLType: c.DataType,
|
||||||
|
VarcharLen: c.VarcharLen,
|
||||||
|
DefaultValue: def,
|
||||||
|
IsNullable: c.IsNullable,
|
||||||
|
IsAutoIncrement: c.IsSerial,
|
||||||
|
IsIdentity: c.IsIdentity,
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, group := range c.UniqueGroups {
|
||||||
|
uniqueGroups[group] = append(uniqueGroups[group], c.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var unique []sqlschema.Unique
|
||||||
|
for name, columns := range uniqueGroups {
|
||||||
|
unique = append(unique, sqlschema.Unique{
|
||||||
|
Name: name,
|
||||||
|
Columns: sqlschema.NewColumns(columns...),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var pk *sqlschema.PrimaryKey
|
||||||
|
if len(table.PrimaryKey.Columns) > 0 {
|
||||||
|
pk = &sqlschema.PrimaryKey{
|
||||||
|
Name: table.PrimaryKey.ConstraintName,
|
||||||
|
Columns: sqlschema.NewColumns(table.PrimaryKey.Columns...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dbSchema.Tables.Set(table.Name, &Table{
|
||||||
|
Schema: table.Schema,
|
||||||
|
Name: table.Name,
|
||||||
|
Columns: colDefs,
|
||||||
|
PrimaryKey: pk,
|
||||||
|
UniqueConstraints: unique,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fk := range fks {
|
||||||
|
dbSchema.ForeignKeys[sqlschema.ForeignKey{
|
||||||
|
From: sqlschema.NewColumnReference(fk.SourceTable, fk.SourceColumns...),
|
||||||
|
To: sqlschema.NewColumnReference(fk.TargetTable, fk.TargetColumns...),
|
||||||
|
}] = fk.ConstraintName
|
||||||
|
}
|
||||||
|
return dbSchema, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type InformationSchemaTable struct {
|
||||||
|
Schema string `bun:"table_schema,pk"`
|
||||||
|
Name string `bun:"table_name,pk"`
|
||||||
|
PrimaryKey PrimaryKey `bun:"embed:primary_key_"`
|
||||||
|
|
||||||
|
Columns []*InformationSchemaColumn `bun:"rel:has-many,join:table_schema=table_schema,join:table_name=table_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type InformationSchemaColumn struct {
|
||||||
|
Schema string `bun:"table_schema"`
|
||||||
|
Table string `bun:"table_name"`
|
||||||
|
Name string `bun:"column_name"`
|
||||||
|
DataType string `bun:"data_type"`
|
||||||
|
VarcharLen int `bun:"varchar_len"`
|
||||||
|
IsArray bool `bun:"is_array"`
|
||||||
|
ArrayDims int `bun:"array_dims"`
|
||||||
|
Default string `bun:"default"`
|
||||||
|
IsDefaultLiteral bool `bun:"default_is_literal_expr"`
|
||||||
|
IsIdentity bool `bun:"is_identity"`
|
||||||
|
IndentityType string `bun:"identity_type"`
|
||||||
|
IsSerial bool `bun:"is_serial"`
|
||||||
|
IsNullable bool `bun:"is_nullable"`
|
||||||
|
UniqueGroups []string `bun:"unique_groups,array"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForeignKey struct {
|
||||||
|
ConstraintName string `bun:"constraint_name"`
|
||||||
|
SourceSchema string `bun:"schema_name"`
|
||||||
|
SourceTable string `bun:"table_name"`
|
||||||
|
SourceColumns []string `bun:"columns,array"`
|
||||||
|
TargetSchema string `bun:"target_schema"`
|
||||||
|
TargetTable string `bun:"target_table"`
|
||||||
|
TargetColumns []string `bun:"target_columns,array"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PrimaryKey struct {
|
||||||
|
ConstraintName string `bun:"name"`
|
||||||
|
Columns []string `bun:"columns,array"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// sqlInspectTables retrieves all user-defined tables in the selected schema.
|
||||||
|
// Pass bun.In([]string{...}) to exclude tables from this inspection or bun.In([]string{''}) to include all results.
|
||||||
|
sqlInspectTables = `
|
||||||
|
SELECT
|
||||||
|
"t".table_schema,
|
||||||
|
"t".table_name,
|
||||||
|
pk.name AS primary_key_name,
|
||||||
|
pk.columns AS primary_key_columns
|
||||||
|
FROM information_schema.tables "t"
|
||||||
|
LEFT JOIN (
|
||||||
|
SELECT i.indrelid, "idx".relname AS "name", ARRAY_AGG("a".attname) AS "columns"
|
||||||
|
FROM pg_index i
|
||||||
|
JOIN pg_attribute "a"
|
||||||
|
ON "a".attrelid = i.indrelid
|
||||||
|
AND "a".attnum = ANY("i".indkey)
|
||||||
|
AND i.indisprimary
|
||||||
|
JOIN pg_class "idx" ON i.indexrelid = "idx".oid
|
||||||
|
GROUP BY 1, 2
|
||||||
|
) pk
|
||||||
|
ON ("t".table_schema || '.' || "t".table_name)::regclass = pk.indrelid
|
||||||
|
WHERE table_type = 'BASE TABLE'
|
||||||
|
AND "t".table_schema = ?
|
||||||
|
AND "t".table_schema NOT LIKE 'pg_%'
|
||||||
|
AND "table_name" NOT IN (?)
|
||||||
|
ORDER BY "t".table_schema, "t".table_name
|
||||||
|
`
|
||||||
|
|
||||||
|
// sqlInspectColumnsQuery retrieves column definitions for the specified table.
|
||||||
|
// Unlike sqlInspectTables and sqlInspectSchema, it should be passed to bun.NewRaw
|
||||||
|
// with additional args for table_schema and table_name.
|
||||||
|
sqlInspectColumnsQuery = `
|
||||||
|
SELECT
|
||||||
|
"c".table_schema,
|
||||||
|
"c".table_name,
|
||||||
|
"c".column_name,
|
||||||
|
"c".data_type,
|
||||||
|
"c".character_maximum_length::integer AS varchar_len,
|
||||||
|
"c".data_type = 'ARRAY' AS is_array,
|
||||||
|
COALESCE("c".array_dims, 0) AS array_dims,
|
||||||
|
CASE
|
||||||
|
WHEN "c".column_default ~ '^''.*''::.*$' THEN substring("c".column_default FROM '^''(.*)''::.*$')
|
||||||
|
ELSE "c".column_default
|
||||||
|
END AS "default",
|
||||||
|
"c".column_default ~ '^''.*''::.*$' OR "c".column_default ~ '^[0-9\.]+$' AS default_is_literal_expr,
|
||||||
|
"c".is_identity = 'YES' AS is_identity,
|
||||||
|
"c".column_default = format('nextval(''%s_%s_seq''::regclass)', "c".table_name, "c".column_name) AS is_serial,
|
||||||
|
COALESCE("c".identity_type, '') AS identity_type,
|
||||||
|
"c".is_nullable = 'YES' AS is_nullable,
|
||||||
|
"c"."unique_groups" AS unique_groups
|
||||||
|
FROM (
|
||||||
|
SELECT
|
||||||
|
"table_schema",
|
||||||
|
"table_name",
|
||||||
|
"column_name",
|
||||||
|
"c".data_type,
|
||||||
|
"c".character_maximum_length,
|
||||||
|
"c".column_default,
|
||||||
|
"c".is_identity,
|
||||||
|
"c".is_nullable,
|
||||||
|
att.array_dims,
|
||||||
|
att.identity_type,
|
||||||
|
att."unique_groups",
|
||||||
|
att."constraint_type"
|
||||||
|
FROM information_schema.columns "c"
|
||||||
|
LEFT JOIN (
|
||||||
|
SELECT
|
||||||
|
s.nspname AS "table_schema",
|
||||||
|
"t".relname AS "table_name",
|
||||||
|
"c".attname AS "column_name",
|
||||||
|
"c".attndims AS array_dims,
|
||||||
|
"c".attidentity AS identity_type,
|
||||||
|
ARRAY_AGG(con.conname) FILTER (WHERE con.contype = 'u') AS "unique_groups",
|
||||||
|
ARRAY_AGG(con.contype) AS "constraint_type"
|
||||||
|
FROM (
|
||||||
|
SELECT
|
||||||
|
conname,
|
||||||
|
contype,
|
||||||
|
connamespace,
|
||||||
|
conrelid,
|
||||||
|
conrelid AS attrelid,
|
||||||
|
UNNEST(conkey) AS attnum
|
||||||
|
FROM pg_constraint
|
||||||
|
) con
|
||||||
|
LEFT JOIN pg_attribute "c" USING (attrelid, attnum)
|
||||||
|
LEFT JOIN pg_namespace s ON s.oid = con.connamespace
|
||||||
|
LEFT JOIN pg_class "t" ON "t".oid = con.conrelid
|
||||||
|
GROUP BY 1, 2, 3, 4, 5
|
||||||
|
) att USING ("table_schema", "table_name", "column_name")
|
||||||
|
) "c"
|
||||||
|
WHERE "table_schema" = ? AND "table_name" = ?
|
||||||
|
ORDER BY "table_schema", "table_name", "column_name"
|
||||||
|
`
|
||||||
|
|
||||||
|
// sqlInspectForeignKeys get FK definitions for user-defined tables.
|
||||||
|
// Pass bun.In([]string{...}) to exclude tables from this inspection or bun.In([]string{''}) to include all results.
|
||||||
|
sqlInspectForeignKeys = `
|
||||||
|
WITH
|
||||||
|
"schemas" AS (
|
||||||
|
SELECT oid, nspname
|
||||||
|
FROM pg_namespace
|
||||||
|
),
|
||||||
|
"tables" AS (
|
||||||
|
SELECT oid, relnamespace, relname, relkind
|
||||||
|
FROM pg_class
|
||||||
|
),
|
||||||
|
"columns" AS (
|
||||||
|
SELECT attrelid, attname, attnum
|
||||||
|
FROM pg_attribute
|
||||||
|
WHERE attisdropped = false
|
||||||
|
)
|
||||||
|
SELECT DISTINCT
|
||||||
|
co.conname AS "constraint_name",
|
||||||
|
ss.nspname AS schema_name,
|
||||||
|
s.relname AS "table_name",
|
||||||
|
ARRAY_AGG(sc.attname) AS "columns",
|
||||||
|
ts.nspname AS target_schema,
|
||||||
|
"t".relname AS target_table,
|
||||||
|
ARRAY_AGG(tc.attname) AS target_columns
|
||||||
|
FROM pg_constraint co
|
||||||
|
LEFT JOIN "tables" s ON s.oid = co.conrelid
|
||||||
|
LEFT JOIN "schemas" ss ON ss.oid = s.relnamespace
|
||||||
|
LEFT JOIN "columns" sc ON sc.attrelid = s.oid AND sc.attnum = ANY(co.conkey)
|
||||||
|
LEFT JOIN "tables" t ON t.oid = co.confrelid
|
||||||
|
LEFT JOIN "schemas" ts ON ts.oid = "t".relnamespace
|
||||||
|
LEFT JOIN "columns" tc ON tc.attrelid = "t".oid AND tc.attnum = ANY(co.confkey)
|
||||||
|
WHERE co.contype = 'f'
|
||||||
|
AND co.conrelid IN (SELECT oid FROM pg_class WHERE relkind = 'r')
|
||||||
|
AND ARRAY_POSITION(co.conkey, sc.attnum) = ARRAY_POSITION(co.confkey, tc.attnum)
|
||||||
|
AND ss.nspname = ?
|
||||||
|
AND s.relname NOT IN (?) AND "t".relname NOT IN (?)
|
||||||
|
GROUP BY "constraint_name", "schema_name", "table_name", target_schema, target_table
|
||||||
|
`
|
||||||
|
)
|
78
vendor/github.com/uptrace/bun/dialect/pgdialect/sqltype.go
generated
vendored
78
vendor/github.com/uptrace/bun/dialect/pgdialect/sqltype.go
generated
vendored
|
@ -5,18 +5,22 @@
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net"
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/uptrace/bun/dialect/sqltype"
|
"github.com/uptrace/bun/dialect/sqltype"
|
||||||
|
"github.com/uptrace/bun/migrate/sqlschema"
|
||||||
"github.com/uptrace/bun/schema"
|
"github.com/uptrace/bun/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Date / Time
|
// Date / Time
|
||||||
pgTypeTimestampTz = "TIMESTAMPTZ" // Timestamp with a time zone
|
pgTypeTimestamp = "TIMESTAMP" // Timestamp
|
||||||
|
pgTypeTimestampWithTz = "TIMESTAMP WITH TIME ZONE" // Timestamp with a time zone
|
||||||
|
pgTypeTimestampTz = "TIMESTAMPTZ" // Timestamp with a time zone (alias)
|
||||||
pgTypeDate = "DATE" // Date
|
pgTypeDate = "DATE" // Date
|
||||||
pgTypeTime = "TIME" // Time without a time zone
|
pgTypeTime = "TIME" // Time without a time zone
|
||||||
pgTypeTimeTz = "TIME WITH TIME ZONE" // Time with a time zone
|
pgTypeTimeTz = "TIME WITH TIME ZONE" // Time with a time zone
|
||||||
pgTypeInterval = "INTERVAL" // Time Interval
|
pgTypeInterval = "INTERVAL" // Time interval
|
||||||
|
|
||||||
// Network Addresses
|
// Network Addresses
|
||||||
pgTypeInet = "INET" // IPv4 or IPv6 hosts and networks
|
pgTypeInet = "INET" // IPv4 or IPv6 hosts and networks
|
||||||
|
@ -28,6 +32,13 @@
|
||||||
pgTypeSerial = "SERIAL" // 4 byte autoincrementing integer
|
pgTypeSerial = "SERIAL" // 4 byte autoincrementing integer
|
||||||
pgTypeBigSerial = "BIGSERIAL" // 8 byte autoincrementing integer
|
pgTypeBigSerial = "BIGSERIAL" // 8 byte autoincrementing integer
|
||||||
|
|
||||||
|
// Character Types
|
||||||
|
pgTypeChar = "CHAR" // fixed length string (blank padded)
|
||||||
|
pgTypeCharacter = "CHARACTER" // alias for CHAR
|
||||||
|
pgTypeText = "TEXT" // variable length string without limit
|
||||||
|
pgTypeVarchar = "VARCHAR" // variable length string with optional limit
|
||||||
|
pgTypeCharacterVarying = "CHARACTER VARYING" // alias for VARCHAR
|
||||||
|
|
||||||
// Binary Data Types
|
// Binary Data Types
|
||||||
pgTypeBytea = "BYTEA" // binary string
|
pgTypeBytea = "BYTEA" // binary string
|
||||||
)
|
)
|
||||||
|
@ -43,6 +54,10 @@ func (d *Dialect) DefaultVarcharLen() int {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Dialect) DefaultSchema() string {
|
||||||
|
return "public"
|
||||||
|
}
|
||||||
|
|
||||||
func fieldSQLType(field *schema.Field) string {
|
func fieldSQLType(field *schema.Field) string {
|
||||||
if field.UserSQLType != "" {
|
if field.UserSQLType != "" {
|
||||||
return field.UserSQLType
|
return field.UserSQLType
|
||||||
|
@ -103,3 +118,62 @@ func sqlType(typ reflect.Type) string {
|
||||||
|
|
||||||
return sqlType
|
return sqlType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
char = newAliases(pgTypeChar, pgTypeCharacter)
|
||||||
|
varchar = newAliases(pgTypeVarchar, pgTypeCharacterVarying)
|
||||||
|
timestampTz = newAliases(sqltype.Timestamp, pgTypeTimestampTz, pgTypeTimestampWithTz)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Dialect) CompareType(col1, col2 sqlschema.Column) bool {
|
||||||
|
typ1, typ2 := strings.ToUpper(col1.GetSQLType()), strings.ToUpper(col2.GetSQLType())
|
||||||
|
|
||||||
|
if typ1 == typ2 {
|
||||||
|
return checkVarcharLen(col1, col2, d.DefaultVarcharLen())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case char.IsAlias(typ1) && char.IsAlias(typ2):
|
||||||
|
return checkVarcharLen(col1, col2, d.DefaultVarcharLen())
|
||||||
|
case varchar.IsAlias(typ1) && varchar.IsAlias(typ2):
|
||||||
|
return checkVarcharLen(col1, col2, d.DefaultVarcharLen())
|
||||||
|
case timestampTz.IsAlias(typ1) && timestampTz.IsAlias(typ2):
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkVarcharLen returns true if columns have the same VarcharLen, or,
|
||||||
|
// if one specifies no VarcharLen and the other one has the default lenght for pgdialect.
|
||||||
|
// We assume that the types are otherwise equivalent and that any non-character column
|
||||||
|
// would have VarcharLen == 0;
|
||||||
|
func checkVarcharLen(col1, col2 sqlschema.Column, defaultLen int) bool {
|
||||||
|
vl1, vl2 := col1.GetVarcharLen(), col2.GetVarcharLen()
|
||||||
|
|
||||||
|
if vl1 == vl2 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vl1 == 0 && vl2 == defaultLen) || (vl1 == defaultLen && vl2 == 0) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeAlias defines aliases for common data types. It is a lightweight string set implementation.
|
||||||
|
type typeAlias map[string]struct{}
|
||||||
|
|
||||||
|
// IsAlias checks if typ1 and typ2 are aliases of the same data type.
|
||||||
|
func (t typeAlias) IsAlias(typ string) bool {
|
||||||
|
_, ok := t[typ]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// newAliases creates a set of aliases.
|
||||||
|
func newAliases(aliases ...string) typeAlias {
|
||||||
|
types := make(typeAlias)
|
||||||
|
for _, a := range aliases {
|
||||||
|
types[a] = struct{}{}
|
||||||
|
}
|
||||||
|
return types
|
||||||
|
}
|
||||||
|
|
2
vendor/github.com/uptrace/bun/dialect/pgdialect/version.go
generated
vendored
2
vendor/github.com/uptrace/bun/dialect/pgdialect/version.go
generated
vendored
|
@ -2,5 +2,5 @@
|
||||||
|
|
||||||
// Version is the current release version.
|
// Version is the current release version.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "1.2.5"
|
return "1.2.6"
|
||||||
}
|
}
|
||||||
|
|
14
vendor/github.com/uptrace/bun/dialect/sqlitedialect/dialect.go
generated
vendored
14
vendor/github.com/uptrace/bun/dialect/sqlitedialect/dialect.go
generated
vendored
|
@ -40,7 +40,8 @@ func New() *Dialect {
|
||||||
feature.TableNotExists |
|
feature.TableNotExists |
|
||||||
feature.SelectExists |
|
feature.SelectExists |
|
||||||
feature.AutoIncrement |
|
feature.AutoIncrement |
|
||||||
feature.CompositeIn
|
feature.CompositeIn |
|
||||||
|
feature.DeleteReturning
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,9 +97,13 @@ func (d *Dialect) DefaultVarcharLen() int {
|
||||||
// AUTOINCREMENT is only valid for INTEGER PRIMARY KEY, and this method will be a noop for other columns.
|
// AUTOINCREMENT is only valid for INTEGER PRIMARY KEY, and this method will be a noop for other columns.
|
||||||
//
|
//
|
||||||
// Because this is a valid construct:
|
// Because this is a valid construct:
|
||||||
|
//
|
||||||
// CREATE TABLE ("id" INTEGER PRIMARY KEY AUTOINCREMENT);
|
// CREATE TABLE ("id" INTEGER PRIMARY KEY AUTOINCREMENT);
|
||||||
|
//
|
||||||
// and this is not:
|
// and this is not:
|
||||||
|
//
|
||||||
// CREATE TABLE ("id" INTEGER AUTOINCREMENT, PRIMARY KEY ("id"));
|
// CREATE TABLE ("id" INTEGER AUTOINCREMENT, PRIMARY KEY ("id"));
|
||||||
|
//
|
||||||
// AppendSequence adds a primary key constraint as a *side-effect*. Callers should expect it to avoid building invalid SQL.
|
// AppendSequence adds a primary key constraint as a *side-effect*. Callers should expect it to avoid building invalid SQL.
|
||||||
// SQLite also [does not support] AUTOINCREMENT column in composite primary keys.
|
// SQLite also [does not support] AUTOINCREMENT column in composite primary keys.
|
||||||
//
|
//
|
||||||
|
@ -111,6 +116,13 @@ func (d *Dialect) AppendSequence(b []byte, table *schema.Table, field *schema.Fi
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultSchemaName is the "schema-name" of the main database.
|
||||||
|
// The details might differ from other dialects, but for all means and purposes
|
||||||
|
// "main" is the default schema in an SQLite database.
|
||||||
|
func (d *Dialect) DefaultSchema() string {
|
||||||
|
return "main"
|
||||||
|
}
|
||||||
|
|
||||||
func fieldSQLType(field *schema.Field) string {
|
func fieldSQLType(field *schema.Field) string {
|
||||||
switch field.DiscoveredSQLType {
|
switch field.DiscoveredSQLType {
|
||||||
case sqltype.SmallInt, sqltype.BigInt:
|
case sqltype.SmallInt, sqltype.BigInt:
|
||||||
|
|
2
vendor/github.com/uptrace/bun/dialect/sqlitedialect/version.go
generated
vendored
2
vendor/github.com/uptrace/bun/dialect/sqlitedialect/version.go
generated
vendored
|
@ -2,5 +2,5 @@
|
||||||
|
|
||||||
// Version is the current release version.
|
// Version is the current release version.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "1.2.5"
|
return "1.2.6"
|
||||||
}
|
}
|
||||||
|
|
6
vendor/github.com/uptrace/bun/internal/util.go
generated
vendored
6
vendor/github.com/uptrace/bun/internal/util.go
generated
vendored
|
@ -79,3 +79,9 @@ func indirectNil(v reflect.Value) reflect.Value {
|
||||||
}
|
}
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MakeQueryBytes returns zero-length byte slice with capacity of 4096.
|
||||||
|
func MakeQueryBytes() []byte {
|
||||||
|
// TODO: make this configurable?
|
||||||
|
return make([]byte, 0, 4096)
|
||||||
|
}
|
||||||
|
|
429
vendor/github.com/uptrace/bun/migrate/auto.go
generated
vendored
Normal file
429
vendor/github.com/uptrace/bun/migrate/auto.go
generated
vendored
Normal file
|
@ -0,0 +1,429 @@
|
||||||
|
package migrate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/internal"
|
||||||
|
"github.com/uptrace/bun/migrate/sqlschema"
|
||||||
|
"github.com/uptrace/bun/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AutoMigratorOption func(m *AutoMigrator)
|
||||||
|
|
||||||
|
// WithModel adds a bun.Model to the scope of migrations.
|
||||||
|
func WithModel(models ...interface{}) AutoMigratorOption {
|
||||||
|
return func(m *AutoMigrator) {
|
||||||
|
m.includeModels = append(m.includeModels, models...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithExcludeTable tells the AutoMigrator to ignore a table in the database.
|
||||||
|
// This prevents AutoMigrator from dropping tables which may exist in the schema
|
||||||
|
// but which are not used by the application.
|
||||||
|
//
|
||||||
|
// Do not exclude tables included via WithModel, as BunModelInspector ignores this setting.
|
||||||
|
func WithExcludeTable(tables ...string) AutoMigratorOption {
|
||||||
|
return func(m *AutoMigrator) {
|
||||||
|
m.excludeTables = append(m.excludeTables, tables...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSchemaName changes the default database schema to migrate objects in.
|
||||||
|
func WithSchemaName(schemaName string) AutoMigratorOption {
|
||||||
|
return func(m *AutoMigrator) {
|
||||||
|
m.schemaName = schemaName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTableNameAuto overrides default migrations table name.
|
||||||
|
func WithTableNameAuto(table string) AutoMigratorOption {
|
||||||
|
return func(m *AutoMigrator) {
|
||||||
|
m.table = table
|
||||||
|
m.migratorOpts = append(m.migratorOpts, WithTableName(table))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLocksTableNameAuto overrides default migration locks table name.
|
||||||
|
func WithLocksTableNameAuto(table string) AutoMigratorOption {
|
||||||
|
return func(m *AutoMigrator) {
|
||||||
|
m.locksTable = table
|
||||||
|
m.migratorOpts = append(m.migratorOpts, WithLocksTableName(table))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMarkAppliedOnSuccessAuto sets the migrator to only mark migrations as applied/unapplied
|
||||||
|
// when their up/down is successful.
|
||||||
|
func WithMarkAppliedOnSuccessAuto(enabled bool) AutoMigratorOption {
|
||||||
|
return func(m *AutoMigrator) {
|
||||||
|
m.migratorOpts = append(m.migratorOpts, WithMarkAppliedOnSuccess(enabled))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMigrationsDirectoryAuto overrides the default directory for migration files.
|
||||||
|
func WithMigrationsDirectoryAuto(directory string) AutoMigratorOption {
|
||||||
|
return func(m *AutoMigrator) {
|
||||||
|
m.migrationsOpts = append(m.migrationsOpts, WithMigrationsDirectory(directory))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoMigrator performs automated schema migrations.
|
||||||
|
//
|
||||||
|
// It is designed to be a drop-in replacement for some Migrator functionality and supports all existing
|
||||||
|
// configuration options.
|
||||||
|
// Similarly to Migrator, it has methods to create SQL migrations, write them to a file, and apply them.
|
||||||
|
// Unlike Migrator, it detects the differences between the state defined by bun models and the current
|
||||||
|
// database schema automatically.
|
||||||
|
//
|
||||||
|
// Usage:
|
||||||
|
// 1. Generate migrations and apply them au once with AutoMigrator.Migrate().
|
||||||
|
// 2. Create up- and down-SQL migration files and apply migrations using Migrator.Migrate().
|
||||||
|
//
|
||||||
|
// While both methods produce complete, reversible migrations (with entries in the database
|
||||||
|
// and SQL migration files), prefer creating migrations and applying them separately for
|
||||||
|
// any non-trivial cases to ensure AutoMigrator detects expected changes correctly.
|
||||||
|
//
|
||||||
|
// Limitations:
|
||||||
|
// - AutoMigrator only supports a subset of the possible ALTER TABLE modifications.
|
||||||
|
// - Some changes are not automatically reversible. For example, you would need to manually
|
||||||
|
// add a CREATE TABLE query to the .down migration file to revert a DROP TABLE migration.
|
||||||
|
// - Does not validate most dialect-specific constraints. For example, when changing column
|
||||||
|
// data type, make sure the data con be auto-casted to the new type.
|
||||||
|
// - Due to how the schema-state diff is calculated, it is not possible to rename a table and
|
||||||
|
// modify any of its columns' _data type_ in a single run. This will cause the AutoMigrator
|
||||||
|
// to drop and re-create the table under a different name; it is better to apply this change in 2 steps.
|
||||||
|
// Renaming a table and renaming its columns at the same time is possible.
|
||||||
|
// - Renaming table/column to an existing name, i.e. like this [A->B] [B->C], is not possible due to how
|
||||||
|
// AutoMigrator distinguishes "rename" and "unchanged" columns.
|
||||||
|
//
|
||||||
|
// Dialect must implement both sqlschema.Inspector and sqlschema.Migrator to be used with AutoMigrator.
|
||||||
|
type AutoMigrator struct {
|
||||||
|
db *bun.DB
|
||||||
|
|
||||||
|
// dbInspector creates the current state for the target database.
|
||||||
|
dbInspector sqlschema.Inspector
|
||||||
|
|
||||||
|
// modelInspector creates the desired state based on the model definitions.
|
||||||
|
modelInspector sqlschema.Inspector
|
||||||
|
|
||||||
|
// dbMigrator executes ALTER TABLE queries.
|
||||||
|
dbMigrator sqlschema.Migrator
|
||||||
|
|
||||||
|
table string // Migrations table (excluded from database inspection)
|
||||||
|
locksTable string // Migration locks table (excluded from database inspection)
|
||||||
|
|
||||||
|
// schemaName is the database schema considered for migration.
|
||||||
|
schemaName string
|
||||||
|
|
||||||
|
// includeModels define the migration scope.
|
||||||
|
includeModels []interface{}
|
||||||
|
|
||||||
|
// excludeTables are excluded from database inspection.
|
||||||
|
excludeTables []string
|
||||||
|
|
||||||
|
// diffOpts are passed to detector constructor.
|
||||||
|
diffOpts []diffOption
|
||||||
|
|
||||||
|
// migratorOpts are passed to Migrator constructor.
|
||||||
|
migratorOpts []MigratorOption
|
||||||
|
|
||||||
|
// migrationsOpts are passed to Migrations constructor.
|
||||||
|
migrationsOpts []MigrationsOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAutoMigrator(db *bun.DB, opts ...AutoMigratorOption) (*AutoMigrator, error) {
|
||||||
|
am := &AutoMigrator{
|
||||||
|
db: db,
|
||||||
|
table: defaultTable,
|
||||||
|
locksTable: defaultLocksTable,
|
||||||
|
schemaName: db.Dialect().DefaultSchema(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(am)
|
||||||
|
}
|
||||||
|
am.excludeTables = append(am.excludeTables, am.table, am.locksTable)
|
||||||
|
|
||||||
|
dbInspector, err := sqlschema.NewInspector(db, sqlschema.WithSchemaName(am.schemaName), sqlschema.WithExcludeTables(am.excludeTables...))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
am.dbInspector = dbInspector
|
||||||
|
am.diffOpts = append(am.diffOpts, withCompareTypeFunc(db.Dialect().(sqlschema.InspectorDialect).CompareType))
|
||||||
|
|
||||||
|
dbMigrator, err := sqlschema.NewMigrator(db, am.schemaName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
am.dbMigrator = dbMigrator
|
||||||
|
|
||||||
|
tables := schema.NewTables(db.Dialect())
|
||||||
|
tables.Register(am.includeModels...)
|
||||||
|
am.modelInspector = sqlschema.NewBunModelInspector(tables, sqlschema.WithSchemaName(am.schemaName))
|
||||||
|
|
||||||
|
return am, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (am *AutoMigrator) plan(ctx context.Context) (*changeset, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
got, err := am.dbInspector.Inspect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
want, err := am.modelInspector.Inspect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := diff(got, want, am.diffOpts...)
|
||||||
|
if err := changes.ResolveDependencies(); err != nil {
|
||||||
|
return nil, fmt.Errorf("plan migrations: %w", err)
|
||||||
|
}
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migrate writes required changes to a new migration file and runs the migration.
|
||||||
|
// This will create and entry in the migrations table, making it possible to revert
|
||||||
|
// the changes with Migrator.Rollback(). MigrationOptions are passed on to Migrator.Migrate().
|
||||||
|
func (am *AutoMigrator) Migrate(ctx context.Context, opts ...MigrationOption) (*MigrationGroup, error) {
|
||||||
|
migrations, _, err := am.createSQLMigrations(ctx, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("auto migrate: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
migrator := NewMigrator(am.db, migrations, am.migratorOpts...)
|
||||||
|
if err := migrator.Init(ctx); err != nil {
|
||||||
|
return nil, fmt.Errorf("auto migrate: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
group, err := migrator.Migrate(ctx, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("auto migrate: %w", err)
|
||||||
|
}
|
||||||
|
return group, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSQLMigration writes required changes to a new migration file.
|
||||||
|
// Use migrate.Migrator to apply the generated migrations.
|
||||||
|
func (am *AutoMigrator) CreateSQLMigrations(ctx context.Context) ([]*MigrationFile, error) {
|
||||||
|
_, files, err := am.createSQLMigrations(ctx, true)
|
||||||
|
return files, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTxSQLMigration writes required changes to a new migration file making sure they will be executed
|
||||||
|
// in a transaction when applied. Use migrate.Migrator to apply the generated migrations.
|
||||||
|
func (am *AutoMigrator) CreateTxSQLMigrations(ctx context.Context) ([]*MigrationFile, error) {
|
||||||
|
_, files, err := am.createSQLMigrations(ctx, false)
|
||||||
|
return files, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (am *AutoMigrator) createSQLMigrations(ctx context.Context, transactional bool) (*Migrations, []*MigrationFile, error) {
|
||||||
|
changes, err := am.plan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("create sql migrations: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
name, _ := genMigrationName(am.schemaName + "_auto")
|
||||||
|
migrations := NewMigrations(am.migrationsOpts...)
|
||||||
|
migrations.Add(Migration{
|
||||||
|
Name: name,
|
||||||
|
Up: changes.Up(am.dbMigrator),
|
||||||
|
Down: changes.Down(am.dbMigrator),
|
||||||
|
Comment: "Changes detected by bun.AutoMigrator",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Append .tx.up.sql or .up.sql to migration name, dependin if it should be transactional.
|
||||||
|
fname := func(direction string) string {
|
||||||
|
return name + map[bool]string{true: ".tx.", false: "."}[transactional] + direction + ".sql"
|
||||||
|
}
|
||||||
|
|
||||||
|
up, err := am.createSQL(ctx, migrations, fname("up"), changes, transactional)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("create sql migration up: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
down, err := am.createSQL(ctx, migrations, fname("down"), changes.GetReverse(), transactional)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("create sql migration down: %w", err)
|
||||||
|
}
|
||||||
|
return migrations, []*MigrationFile{up, down}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (am *AutoMigrator) createSQL(_ context.Context, migrations *Migrations, fname string, changes *changeset, transactional bool) (*MigrationFile, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
if transactional {
|
||||||
|
buf.WriteString("SET statement_timeout = 0;")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := changes.WriteTo(&buf, am.dbMigrator); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
content := buf.Bytes()
|
||||||
|
|
||||||
|
fpath := filepath.Join(migrations.getDirectory(), fname)
|
||||||
|
if err := os.WriteFile(fpath, content, 0o644); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mf := &MigrationFile{
|
||||||
|
Name: fname,
|
||||||
|
Path: fpath,
|
||||||
|
Content: string(content),
|
||||||
|
}
|
||||||
|
return mf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Func creates a MigrationFunc that applies all operations all the changeset.
|
||||||
|
func (c *changeset) Func(m sqlschema.Migrator) MigrationFunc {
|
||||||
|
return func(ctx context.Context, db *bun.DB) error {
|
||||||
|
return c.apply(ctx, db, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReverse returns a new changeset with each operation in it "reversed" and in reverse order.
|
||||||
|
func (c *changeset) GetReverse() *changeset {
|
||||||
|
var reverse changeset
|
||||||
|
for i := len(c.operations) - 1; i >= 0; i-- {
|
||||||
|
reverse.Add(c.operations[i].GetReverse())
|
||||||
|
}
|
||||||
|
return &reverse
|
||||||
|
}
|
||||||
|
|
||||||
|
// Up is syntactic sugar.
|
||||||
|
func (c *changeset) Up(m sqlschema.Migrator) MigrationFunc {
|
||||||
|
return c.Func(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Down is syntactic sugar.
|
||||||
|
func (c *changeset) Down(m sqlschema.Migrator) MigrationFunc {
|
||||||
|
return c.GetReverse().Func(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// apply generates SQL for each operation and executes it.
|
||||||
|
func (c *changeset) apply(ctx context.Context, db *bun.DB, m sqlschema.Migrator) error {
|
||||||
|
if len(c.operations) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, op := range c.operations {
|
||||||
|
if _, isComment := op.(*comment); isComment {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b := internal.MakeQueryBytes()
|
||||||
|
b, err := m.AppendSQL(b, op)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("apply changes: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
query := internal.String(b)
|
||||||
|
if _, err = db.ExecContext(ctx, query); err != nil {
|
||||||
|
return fmt.Errorf("apply changes: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *changeset) WriteTo(w io.Writer, m sqlschema.Migrator) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
b := internal.MakeQueryBytes()
|
||||||
|
for _, op := range c.operations {
|
||||||
|
if c, isComment := op.(*comment); isComment {
|
||||||
|
b = append(b, "/*\n"...)
|
||||||
|
b = append(b, *c...)
|
||||||
|
b = append(b, "\n*/"...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = m.AppendSQL(b, op)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("write changeset: %w", err)
|
||||||
|
}
|
||||||
|
b = append(b, ";\n"...)
|
||||||
|
}
|
||||||
|
if _, err := w.Write(b); err != nil {
|
||||||
|
return fmt.Errorf("write changeset: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *changeset) ResolveDependencies() error {
|
||||||
|
if len(c.operations) <= 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
unvisited = iota
|
||||||
|
current
|
||||||
|
visited
|
||||||
|
)
|
||||||
|
|
||||||
|
status := make(map[Operation]int, len(c.operations))
|
||||||
|
for _, op := range c.operations {
|
||||||
|
status[op] = unvisited
|
||||||
|
}
|
||||||
|
|
||||||
|
var resolved []Operation
|
||||||
|
var nextOp Operation
|
||||||
|
var visit func(op Operation) error
|
||||||
|
|
||||||
|
next := func() bool {
|
||||||
|
for op, s := range status {
|
||||||
|
if s == unvisited {
|
||||||
|
nextOp = op
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// visit iterates over c.operations until it finds all operations that depend on the current one
|
||||||
|
// or runs into cirtular dependency, in which case it will return an error.
|
||||||
|
visit = func(op Operation) error {
|
||||||
|
switch status[op] {
|
||||||
|
case visited:
|
||||||
|
return nil
|
||||||
|
case current:
|
||||||
|
// TODO: add details (circle) to the error message
|
||||||
|
return errors.New("detected circular dependency")
|
||||||
|
}
|
||||||
|
|
||||||
|
status[op] = current
|
||||||
|
|
||||||
|
for _, another := range c.operations {
|
||||||
|
if dop, hasDeps := another.(interface {
|
||||||
|
DependsOn(Operation) bool
|
||||||
|
}); another == op || !hasDeps || !dop.DependsOn(op) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := visit(another); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
status[op] = visited
|
||||||
|
|
||||||
|
// Any dependent nodes would've already been added to the list by now, so we prepend.
|
||||||
|
resolved = append([]Operation{op}, resolved...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for next() {
|
||||||
|
if err := visit(nextOp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.operations = resolved
|
||||||
|
return nil
|
||||||
|
}
|
411
vendor/github.com/uptrace/bun/migrate/diff.go
generated
vendored
Normal file
411
vendor/github.com/uptrace/bun/migrate/diff.go
generated
vendored
Normal file
|
@ -0,0 +1,411 @@
|
||||||
|
package migrate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/uptrace/bun/migrate/sqlschema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// changeset is a set of changes to the database schema definition.
|
||||||
|
type changeset struct {
|
||||||
|
operations []Operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add new operations to the changeset.
|
||||||
|
func (c *changeset) Add(op ...Operation) {
|
||||||
|
c.operations = append(c.operations, op...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// diff calculates the diff between the current database schema and the target state.
|
||||||
|
// The changeset is not sorted -- the caller should resolve dependencies before applying the changes.
|
||||||
|
func diff(got, want sqlschema.Database, opts ...diffOption) *changeset {
|
||||||
|
d := newDetector(got, want, opts...)
|
||||||
|
return d.detectChanges()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *detector) detectChanges() *changeset {
|
||||||
|
currentTables := d.current.GetTables()
|
||||||
|
targetTables := d.target.GetTables()
|
||||||
|
|
||||||
|
RenameCreate:
|
||||||
|
for wantName, wantTable := range targetTables.FromOldest() {
|
||||||
|
|
||||||
|
// A table with this name exists in the database. We assume that schema objects won't
|
||||||
|
// be renamed to an already existing name, nor do we support such cases.
|
||||||
|
// Simply check if the table definition has changed.
|
||||||
|
if haveTable, ok := currentTables.Get(wantName); ok {
|
||||||
|
d.detectColumnChanges(haveTable, wantTable, true)
|
||||||
|
d.detectConstraintChanges(haveTable, wantTable)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find all renamed tables. We assume that renamed tables have the same signature.
|
||||||
|
for haveName, haveTable := range currentTables.FromOldest() {
|
||||||
|
if _, exists := targetTables.Get(haveName); !exists && d.canRename(haveTable, wantTable) {
|
||||||
|
d.changes.Add(&RenameTableOp{
|
||||||
|
TableName: haveTable.GetName(),
|
||||||
|
NewName: wantName,
|
||||||
|
})
|
||||||
|
d.refMap.RenameTable(haveTable.GetName(), wantName)
|
||||||
|
|
||||||
|
// Find renamed columns, if any, and check if constraints (PK, UNIQUE) have been updated.
|
||||||
|
// We need not check wantTable any further.
|
||||||
|
d.detectColumnChanges(haveTable, wantTable, false)
|
||||||
|
d.detectConstraintChanges(haveTable, wantTable)
|
||||||
|
currentTables.Delete(haveName)
|
||||||
|
continue RenameCreate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If wantTable does not exist in the database and was not renamed
|
||||||
|
// then we need to create this table in the database.
|
||||||
|
additional := wantTable.(*sqlschema.BunTable)
|
||||||
|
d.changes.Add(&CreateTableOp{
|
||||||
|
TableName: wantTable.GetName(),
|
||||||
|
Model: additional.Model,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop any remaining "current" tables which do not have a model.
|
||||||
|
for name, table := range currentTables.FromOldest() {
|
||||||
|
if _, keep := targetTables.Get(name); !keep {
|
||||||
|
d.changes.Add(&DropTableOp{
|
||||||
|
TableName: table.GetName(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
targetFKs := d.target.GetForeignKeys()
|
||||||
|
currentFKs := d.refMap.Deref()
|
||||||
|
|
||||||
|
for fk := range targetFKs {
|
||||||
|
if _, ok := currentFKs[fk]; !ok {
|
||||||
|
d.changes.Add(&AddForeignKeyOp{
|
||||||
|
ForeignKey: fk,
|
||||||
|
ConstraintName: "", // leave empty to let each dialect apply their convention
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for fk, name := range currentFKs {
|
||||||
|
if _, ok := targetFKs[fk]; !ok {
|
||||||
|
d.changes.Add(&DropForeignKeyOp{
|
||||||
|
ConstraintName: name,
|
||||||
|
ForeignKey: fk,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &d.changes
|
||||||
|
}
|
||||||
|
|
||||||
|
// detechColumnChanges finds renamed columns and, if checkType == true, columns with changed type.
|
||||||
|
func (d *detector) detectColumnChanges(current, target sqlschema.Table, checkType bool) {
|
||||||
|
currentColumns := current.GetColumns()
|
||||||
|
targetColumns := target.GetColumns()
|
||||||
|
|
||||||
|
ChangeRename:
|
||||||
|
for tName, tCol := range targetColumns.FromOldest() {
|
||||||
|
|
||||||
|
// This column exists in the database, so it hasn't been renamed, dropped, or added.
|
||||||
|
// Still, we should not delete(columns, thisColumn), because later we will need to
|
||||||
|
// check that we do not try to rename a column to an already a name that already exists.
|
||||||
|
if cCol, ok := currentColumns.Get(tName); ok {
|
||||||
|
if checkType && !d.equalColumns(cCol, tCol) {
|
||||||
|
d.changes.Add(&ChangeColumnTypeOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
Column: tName,
|
||||||
|
From: cCol,
|
||||||
|
To: d.makeTargetColDef(cCol, tCol),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Column tName does not exist in the database -- it's been either renamed or added.
|
||||||
|
// Find renamed columns first.
|
||||||
|
for cName, cCol := range currentColumns.FromOldest() {
|
||||||
|
// Cannot rename if a column with this name already exists or the types differ.
|
||||||
|
if _, exists := targetColumns.Get(cName); exists || !d.equalColumns(tCol, cCol) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
d.changes.Add(&RenameColumnOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
OldName: cName,
|
||||||
|
NewName: tName,
|
||||||
|
})
|
||||||
|
d.refMap.RenameColumn(target.GetName(), cName, tName)
|
||||||
|
currentColumns.Delete(cName) // no need to check this column again
|
||||||
|
|
||||||
|
// Update primary key definition to avoid superficially recreating the constraint.
|
||||||
|
current.GetPrimaryKey().Columns.Replace(cName, tName)
|
||||||
|
|
||||||
|
continue ChangeRename
|
||||||
|
}
|
||||||
|
|
||||||
|
d.changes.Add(&AddColumnOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
ColumnName: tName,
|
||||||
|
Column: tCol,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop columns which do not exist in the target schema and were not renamed.
|
||||||
|
for cName, cCol := range currentColumns.FromOldest() {
|
||||||
|
if _, keep := targetColumns.Get(cName); !keep {
|
||||||
|
d.changes.Add(&DropColumnOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
ColumnName: cName,
|
||||||
|
Column: cCol,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *detector) detectConstraintChanges(current, target sqlschema.Table) {
|
||||||
|
Add:
|
||||||
|
for _, want := range target.GetUniqueConstraints() {
|
||||||
|
for _, got := range current.GetUniqueConstraints() {
|
||||||
|
if got.Equals(want) {
|
||||||
|
continue Add
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.changes.Add(&AddUniqueConstraintOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
Unique: want,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
Drop:
|
||||||
|
for _, got := range current.GetUniqueConstraints() {
|
||||||
|
for _, want := range target.GetUniqueConstraints() {
|
||||||
|
if got.Equals(want) {
|
||||||
|
continue Drop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.changes.Add(&DropUniqueConstraintOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
Unique: got,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
targetPK := target.GetPrimaryKey()
|
||||||
|
currentPK := current.GetPrimaryKey()
|
||||||
|
|
||||||
|
// Detect primary key changes
|
||||||
|
if targetPK == nil && currentPK == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case targetPK == nil && currentPK != nil:
|
||||||
|
d.changes.Add(&DropPrimaryKeyOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
PrimaryKey: *currentPK,
|
||||||
|
})
|
||||||
|
case currentPK == nil && targetPK != nil:
|
||||||
|
d.changes.Add(&AddPrimaryKeyOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
PrimaryKey: *targetPK,
|
||||||
|
})
|
||||||
|
case targetPK.Columns != currentPK.Columns:
|
||||||
|
d.changes.Add(&ChangePrimaryKeyOp{
|
||||||
|
TableName: target.GetName(),
|
||||||
|
Old: *currentPK,
|
||||||
|
New: *targetPK,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDetector(got, want sqlschema.Database, opts ...diffOption) *detector {
|
||||||
|
cfg := &detectorConfig{
|
||||||
|
cmpType: func(c1, c2 sqlschema.Column) bool {
|
||||||
|
return c1.GetSQLType() == c2.GetSQLType() && c1.GetVarcharLen() == c2.GetVarcharLen()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &detector{
|
||||||
|
current: got,
|
||||||
|
target: want,
|
||||||
|
refMap: newRefMap(got.GetForeignKeys()),
|
||||||
|
cmpType: cfg.cmpType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type diffOption func(*detectorConfig)
|
||||||
|
|
||||||
|
func withCompareTypeFunc(f CompareTypeFunc) diffOption {
|
||||||
|
return func(cfg *detectorConfig) {
|
||||||
|
cfg.cmpType = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectorConfig controls how differences in the model states are resolved.
|
||||||
|
type detectorConfig struct {
|
||||||
|
cmpType CompareTypeFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// detector may modify the passed database schemas, so it isn't safe to re-use them.
|
||||||
|
type detector struct {
|
||||||
|
// current state represents the existing database schema.
|
||||||
|
current sqlschema.Database
|
||||||
|
|
||||||
|
// target state represents the database schema defined in bun models.
|
||||||
|
target sqlschema.Database
|
||||||
|
|
||||||
|
changes changeset
|
||||||
|
refMap refMap
|
||||||
|
|
||||||
|
// cmpType determines column type equivalence.
|
||||||
|
// Default is direct comparison with '==' operator, which is inaccurate
|
||||||
|
// due to the existence of dialect-specific type aliases. The caller
|
||||||
|
// should pass a concrete InspectorDialect.EquuivalentType for robust comparison.
|
||||||
|
cmpType CompareTypeFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// canRename checks if t1 can be renamed to t2.
|
||||||
|
func (d detector) canRename(t1, t2 sqlschema.Table) bool {
|
||||||
|
return t1.GetSchema() == t2.GetSchema() && equalSignatures(t1, t2, d.equalColumns)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d detector) equalColumns(col1, col2 sqlschema.Column) bool {
|
||||||
|
return d.cmpType(col1, col2) &&
|
||||||
|
col1.GetDefaultValue() == col2.GetDefaultValue() &&
|
||||||
|
col1.GetIsNullable() == col2.GetIsNullable() &&
|
||||||
|
col1.GetIsAutoIncrement() == col2.GetIsAutoIncrement() &&
|
||||||
|
col1.GetIsIdentity() == col2.GetIsIdentity()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d detector) makeTargetColDef(current, target sqlschema.Column) sqlschema.Column {
|
||||||
|
// Avoid unneccessary type-change migrations if the types are equivalent.
|
||||||
|
if d.cmpType(current, target) {
|
||||||
|
target = &sqlschema.BaseColumn{
|
||||||
|
Name: target.GetName(),
|
||||||
|
DefaultValue: target.GetDefaultValue(),
|
||||||
|
IsNullable: target.GetIsNullable(),
|
||||||
|
IsAutoIncrement: target.GetIsAutoIncrement(),
|
||||||
|
IsIdentity: target.GetIsIdentity(),
|
||||||
|
|
||||||
|
SQLType: current.GetSQLType(),
|
||||||
|
VarcharLen: current.GetVarcharLen(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return target
|
||||||
|
}
|
||||||
|
|
||||||
|
type CompareTypeFunc func(sqlschema.Column, sqlschema.Column) bool
|
||||||
|
|
||||||
|
// equalSignatures determines if two tables have the same "signature".
|
||||||
|
func equalSignatures(t1, t2 sqlschema.Table, eq CompareTypeFunc) bool {
|
||||||
|
sig1 := newSignature(t1, eq)
|
||||||
|
sig2 := newSignature(t2, eq)
|
||||||
|
return sig1.Equals(sig2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// signature is a set of column definitions, which allows "relation/name-agnostic" comparison between them;
|
||||||
|
// meaning that two columns are considered equal if their types are the same.
|
||||||
|
type signature struct {
|
||||||
|
|
||||||
|
// underlying stores the number of occurences for each unique column type.
|
||||||
|
// It helps to account for the fact that a table might have multiple columns that have the same type.
|
||||||
|
underlying map[sqlschema.BaseColumn]int
|
||||||
|
|
||||||
|
eq CompareTypeFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSignature(t sqlschema.Table, eq CompareTypeFunc) signature {
|
||||||
|
s := signature{
|
||||||
|
underlying: make(map[sqlschema.BaseColumn]int),
|
||||||
|
eq: eq,
|
||||||
|
}
|
||||||
|
s.scan(t)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// scan iterates over table's field and counts occurrences of each unique column definition.
|
||||||
|
func (s *signature) scan(t sqlschema.Table) {
|
||||||
|
for _, icol := range t.GetColumns().FromOldest() {
|
||||||
|
scanCol := icol.(*sqlschema.BaseColumn)
|
||||||
|
// This is slightly more expensive than if the columns could be compared directly
|
||||||
|
// and we always did s.underlying[col]++, but we get type-equivalence in return.
|
||||||
|
col, count := s.getCount(*scanCol)
|
||||||
|
if count == 0 {
|
||||||
|
s.underlying[*scanCol] = 1
|
||||||
|
} else {
|
||||||
|
s.underlying[col]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCount uses CompareTypeFunc to find a column with the same (equivalent) SQL type
|
||||||
|
// and returns its count. Count 0 means there are no columns with of this type.
|
||||||
|
func (s *signature) getCount(keyCol sqlschema.BaseColumn) (key sqlschema.BaseColumn, count int) {
|
||||||
|
for col, cnt := range s.underlying {
|
||||||
|
if s.eq(&col, &keyCol) {
|
||||||
|
return col, cnt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return keyCol, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals returns true if 2 signatures share an identical set of columns.
|
||||||
|
func (s *signature) Equals(other signature) bool {
|
||||||
|
if len(s.underlying) != len(other.underlying) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for col, count := range s.underlying {
|
||||||
|
if _, countOther := other.getCount(col); countOther != count {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// refMap is a utility for tracking superficial changes in foreign keys,
|
||||||
|
// which do not require any modificiation in the database.
|
||||||
|
// Modern SQL dialects automatically updated foreign key constraints whenever
|
||||||
|
// a column or a table is renamed. Detector can use refMap to ignore any
|
||||||
|
// differences in foreign keys which were caused by renamed column/table.
|
||||||
|
type refMap map[*sqlschema.ForeignKey]string
|
||||||
|
|
||||||
|
func newRefMap(fks map[sqlschema.ForeignKey]string) refMap {
|
||||||
|
rm := make(map[*sqlschema.ForeignKey]string)
|
||||||
|
for fk, name := range fks {
|
||||||
|
rm[&fk] = name
|
||||||
|
}
|
||||||
|
return rm
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameT updates table name in all foreign key definions which depend on it.
|
||||||
|
func (rm refMap) RenameTable(tableName string, newName string) {
|
||||||
|
for fk := range rm {
|
||||||
|
switch tableName {
|
||||||
|
case fk.From.TableName:
|
||||||
|
fk.From.TableName = newName
|
||||||
|
case fk.To.TableName:
|
||||||
|
fk.To.TableName = newName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameColumn updates column name in all foreign key definions which depend on it.
|
||||||
|
func (rm refMap) RenameColumn(tableName string, column, newName string) {
|
||||||
|
for fk := range rm {
|
||||||
|
if tableName == fk.From.TableName {
|
||||||
|
fk.From.Column.Replace(column, newName)
|
||||||
|
}
|
||||||
|
if tableName == fk.To.TableName {
|
||||||
|
fk.To.Column.Replace(column, newName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deref returns copies of ForeignKey values to a map.
|
||||||
|
func (rm refMap) Deref() map[sqlschema.ForeignKey]string {
|
||||||
|
out := make(map[sqlschema.ForeignKey]string)
|
||||||
|
for fk, name := range rm {
|
||||||
|
out[*fk] = name
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
23
vendor/github.com/uptrace/bun/migrate/migrator.go
generated
vendored
23
vendor/github.com/uptrace/bun/migrate/migrator.go
generated
vendored
|
@ -12,14 +12,21 @@
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultTable = "bun_migrations"
|
||||||
|
defaultLocksTable = "bun_migration_locks"
|
||||||
|
)
|
||||||
|
|
||||||
type MigratorOption func(m *Migrator)
|
type MigratorOption func(m *Migrator)
|
||||||
|
|
||||||
|
// WithTableName overrides default migrations table name.
|
||||||
func WithTableName(table string) MigratorOption {
|
func WithTableName(table string) MigratorOption {
|
||||||
return func(m *Migrator) {
|
return func(m *Migrator) {
|
||||||
m.table = table
|
m.table = table
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithLocksTableName overrides default migration locks table name.
|
||||||
func WithLocksTableName(table string) MigratorOption {
|
func WithLocksTableName(table string) MigratorOption {
|
||||||
return func(m *Migrator) {
|
return func(m *Migrator) {
|
||||||
m.locksTable = table
|
m.locksTable = table
|
||||||
|
@ -27,7 +34,7 @@ func WithLocksTableName(table string) MigratorOption {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithMarkAppliedOnSuccess sets the migrator to only mark migrations as applied/unapplied
|
// WithMarkAppliedOnSuccess sets the migrator to only mark migrations as applied/unapplied
|
||||||
// when their up/down is successful
|
// when their up/down is successful.
|
||||||
func WithMarkAppliedOnSuccess(enabled bool) MigratorOption {
|
func WithMarkAppliedOnSuccess(enabled bool) MigratorOption {
|
||||||
return func(m *Migrator) {
|
return func(m *Migrator) {
|
||||||
m.markAppliedOnSuccess = enabled
|
m.markAppliedOnSuccess = enabled
|
||||||
|
@ -52,8 +59,8 @@ func NewMigrator(db *bun.DB, migrations *Migrations, opts ...MigratorOption) *Mi
|
||||||
|
|
||||||
ms: migrations.ms,
|
ms: migrations.ms,
|
||||||
|
|
||||||
table: "bun_migrations",
|
table: defaultTable,
|
||||||
locksTable: "bun_migration_locks",
|
locksTable: defaultLocksTable,
|
||||||
}
|
}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
opt(m)
|
opt(m)
|
||||||
|
@ -246,7 +253,7 @@ func (m *Migrator) CreateGoMigration(
|
||||||
opt(cfg)
|
opt(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
name, err := m.genMigrationName(name)
|
name, err := genMigrationName(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -269,7 +276,7 @@ func (m *Migrator) CreateGoMigration(
|
||||||
|
|
||||||
// CreateTxSQLMigration creates transactional up and down SQL migration files.
|
// CreateTxSQLMigration creates transactional up and down SQL migration files.
|
||||||
func (m *Migrator) CreateTxSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
func (m *Migrator) CreateTxSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
||||||
name, err := m.genMigrationName(name)
|
name, err := genMigrationName(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -289,7 +296,7 @@ func (m *Migrator) CreateTxSQLMigrations(ctx context.Context, name string) ([]*M
|
||||||
|
|
||||||
// CreateSQLMigrations creates up and down SQL migration files.
|
// CreateSQLMigrations creates up and down SQL migration files.
|
||||||
func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
||||||
name, err := m.genMigrationName(name)
|
name, err := genMigrationName(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -307,7 +314,7 @@ func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*Mig
|
||||||
return []*MigrationFile{up, down}, nil
|
return []*MigrationFile{up, down}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Migrator) createSQL(ctx context.Context, fname string, transactional bool) (*MigrationFile, error) {
|
func (m *Migrator) createSQL(_ context.Context, fname string, transactional bool) (*MigrationFile, error) {
|
||||||
fpath := filepath.Join(m.migrations.getDirectory(), fname)
|
fpath := filepath.Join(m.migrations.getDirectory(), fname)
|
||||||
|
|
||||||
template := sqlTemplate
|
template := sqlTemplate
|
||||||
|
@ -329,7 +336,7 @@ func (m *Migrator) createSQL(ctx context.Context, fname string, transactional bo
|
||||||
|
|
||||||
var nameRE = regexp.MustCompile(`^[0-9a-z_\-]+$`)
|
var nameRE = regexp.MustCompile(`^[0-9a-z_\-]+$`)
|
||||||
|
|
||||||
func (m *Migrator) genMigrationName(name string) (string, error) {
|
func genMigrationName(name string) (string, error) {
|
||||||
const timeFormat = "20060102150405"
|
const timeFormat = "20060102150405"
|
||||||
|
|
||||||
if name == "" {
|
if name == "" {
|
||||||
|
|
340
vendor/github.com/uptrace/bun/migrate/operations.go
generated
vendored
Normal file
340
vendor/github.com/uptrace/bun/migrate/operations.go
generated
vendored
Normal file
|
@ -0,0 +1,340 @@
|
||||||
|
package migrate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun/migrate/sqlschema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Operation encapsulates the request to change a database definition
|
||||||
|
// and knowns which operation can revert it.
|
||||||
|
//
|
||||||
|
// It is useful to define "monolith" Operations whenever possible,
|
||||||
|
// even though they a dialect may require several distinct steps to apply them.
|
||||||
|
// For example, changing a primary key involves first dropping the old constraint
|
||||||
|
// before generating the new one. Yet, this is only an implementation detail and
|
||||||
|
// passing a higher-level ChangePrimaryKeyOp will give the dialect more information
|
||||||
|
// about the applied change.
|
||||||
|
//
|
||||||
|
// Some operations might be irreversible due to technical limitations. Returning
|
||||||
|
// a *comment from GetReverse() will add an explanatory note to the generate migation file.
|
||||||
|
//
|
||||||
|
// To declare dependency on another Operation, operations should implement
|
||||||
|
// { DependsOn(Operation) bool } interface, which Changeset will use to resolve dependencies.
|
||||||
|
type Operation interface {
|
||||||
|
GetReverse() Operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTableOp creates a new table in the schema.
|
||||||
|
//
|
||||||
|
// It does not report dependency on any other migration and may be executed first.
|
||||||
|
// Make sure the dialect does not include FOREIGN KEY constraints in the CREATE TABLE
|
||||||
|
// statement, as those may potentially reference not-yet-existing columns/tables.
|
||||||
|
type CreateTableOp struct {
|
||||||
|
TableName string
|
||||||
|
Model interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*CreateTableOp)(nil)
|
||||||
|
|
||||||
|
func (op *CreateTableOp) GetReverse() Operation {
|
||||||
|
return &DropTableOp{TableName: op.TableName}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropTableOp drops a database table. This operation is not reversible.
|
||||||
|
type DropTableOp struct {
|
||||||
|
TableName string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*DropTableOp)(nil)
|
||||||
|
|
||||||
|
func (op *DropTableOp) DependsOn(another Operation) bool {
|
||||||
|
drop, ok := another.(*DropForeignKeyOp)
|
||||||
|
return ok && drop.ForeignKey.DependsOnTable(op.TableName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReverse for a DropTable returns a no-op migration. Logically, CreateTable is the reverse,
|
||||||
|
// but DropTable does not have the table's definition to create one.
|
||||||
|
func (op *DropTableOp) GetReverse() Operation {
|
||||||
|
c := comment(fmt.Sprintf("WARNING: \"DROP TABLE %s\" cannot be reversed automatically because table definition is not available", op.TableName))
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameTableOp renames the table. Changing the "schema" part of the table's FQN (moving tables between schemas) is not allowed.
|
||||||
|
type RenameTableOp struct {
|
||||||
|
TableName string
|
||||||
|
NewName string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*RenameTableOp)(nil)
|
||||||
|
|
||||||
|
func (op *RenameTableOp) GetReverse() Operation {
|
||||||
|
return &RenameTableOp{
|
||||||
|
TableName: op.NewName,
|
||||||
|
NewName: op.TableName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameColumnOp renames a column in the table. If the changeset includes a rename operation
|
||||||
|
// for the column's table, it should be executed first.
|
||||||
|
type RenameColumnOp struct {
|
||||||
|
TableName string
|
||||||
|
OldName string
|
||||||
|
NewName string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*RenameColumnOp)(nil)
|
||||||
|
|
||||||
|
func (op *RenameColumnOp) GetReverse() Operation {
|
||||||
|
return &RenameColumnOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
OldName: op.NewName,
|
||||||
|
NewName: op.OldName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *RenameColumnOp) DependsOn(another Operation) bool {
|
||||||
|
rename, ok := another.(*RenameTableOp)
|
||||||
|
return ok && op.TableName == rename.NewName
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddColumnOp adds a new column to the table.
|
||||||
|
type AddColumnOp struct {
|
||||||
|
TableName string
|
||||||
|
ColumnName string
|
||||||
|
Column sqlschema.Column
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*AddColumnOp)(nil)
|
||||||
|
|
||||||
|
func (op *AddColumnOp) GetReverse() Operation {
|
||||||
|
return &DropColumnOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
ColumnName: op.ColumnName,
|
||||||
|
Column: op.Column,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropColumnOp drop a column from the table.
|
||||||
|
//
|
||||||
|
// While some dialects allow DROP CASCADE to drop dependent constraints,
|
||||||
|
// explicit handling on constraints is preferred for transparency and debugging.
|
||||||
|
// DropColumnOp depends on DropForeignKeyOp, DropPrimaryKeyOp, and ChangePrimaryKeyOp
|
||||||
|
// if any of the constraints is defined on this table.
|
||||||
|
type DropColumnOp struct {
|
||||||
|
TableName string
|
||||||
|
ColumnName string
|
||||||
|
Column sqlschema.Column
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*DropColumnOp)(nil)
|
||||||
|
|
||||||
|
func (op *DropColumnOp) GetReverse() Operation {
|
||||||
|
return &AddColumnOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
ColumnName: op.ColumnName,
|
||||||
|
Column: op.Column,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *DropColumnOp) DependsOn(another Operation) bool {
|
||||||
|
switch drop := another.(type) {
|
||||||
|
case *DropForeignKeyOp:
|
||||||
|
return drop.ForeignKey.DependsOnColumn(op.TableName, op.ColumnName)
|
||||||
|
case *DropPrimaryKeyOp:
|
||||||
|
return op.TableName == drop.TableName && drop.PrimaryKey.Columns.Contains(op.ColumnName)
|
||||||
|
case *ChangePrimaryKeyOp:
|
||||||
|
return op.TableName == drop.TableName && drop.Old.Columns.Contains(op.ColumnName)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddForeignKey adds a new FOREIGN KEY constraint.
|
||||||
|
type AddForeignKeyOp struct {
|
||||||
|
ForeignKey sqlschema.ForeignKey
|
||||||
|
ConstraintName string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*AddForeignKeyOp)(nil)
|
||||||
|
|
||||||
|
func (op *AddForeignKeyOp) TableName() string {
|
||||||
|
return op.ForeignKey.From.TableName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *AddForeignKeyOp) DependsOn(another Operation) bool {
|
||||||
|
switch another := another.(type) {
|
||||||
|
case *RenameTableOp:
|
||||||
|
return op.ForeignKey.DependsOnTable(another.TableName) || op.ForeignKey.DependsOnTable(another.NewName)
|
||||||
|
case *CreateTableOp:
|
||||||
|
return op.ForeignKey.DependsOnTable(another.TableName)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *AddForeignKeyOp) GetReverse() Operation {
|
||||||
|
return &DropForeignKeyOp{
|
||||||
|
ForeignKey: op.ForeignKey,
|
||||||
|
ConstraintName: op.ConstraintName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropForeignKeyOp drops a FOREIGN KEY constraint.
|
||||||
|
type DropForeignKeyOp struct {
|
||||||
|
ForeignKey sqlschema.ForeignKey
|
||||||
|
ConstraintName string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*DropForeignKeyOp)(nil)
|
||||||
|
|
||||||
|
func (op *DropForeignKeyOp) TableName() string {
|
||||||
|
return op.ForeignKey.From.TableName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *DropForeignKeyOp) GetReverse() Operation {
|
||||||
|
return &AddForeignKeyOp{
|
||||||
|
ForeignKey: op.ForeignKey,
|
||||||
|
ConstraintName: op.ConstraintName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUniqueConstraintOp adds new UNIQUE constraint to the table.
|
||||||
|
type AddUniqueConstraintOp struct {
|
||||||
|
TableName string
|
||||||
|
Unique sqlschema.Unique
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*AddUniqueConstraintOp)(nil)
|
||||||
|
|
||||||
|
func (op *AddUniqueConstraintOp) GetReverse() Operation {
|
||||||
|
return &DropUniqueConstraintOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
Unique: op.Unique,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *AddUniqueConstraintOp) DependsOn(another Operation) bool {
|
||||||
|
switch another := another.(type) {
|
||||||
|
case *AddColumnOp:
|
||||||
|
return op.TableName == another.TableName && op.Unique.Columns.Contains(another.ColumnName)
|
||||||
|
case *RenameTableOp:
|
||||||
|
return op.TableName == another.NewName
|
||||||
|
case *DropUniqueConstraintOp:
|
||||||
|
// We want to drop the constraint with the same name before adding this one.
|
||||||
|
return op.TableName == another.TableName && op.Unique.Name == another.Unique.Name
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropUniqueConstraintOp drops a UNIQUE constraint.
|
||||||
|
type DropUniqueConstraintOp struct {
|
||||||
|
TableName string
|
||||||
|
Unique sqlschema.Unique
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*DropUniqueConstraintOp)(nil)
|
||||||
|
|
||||||
|
func (op *DropUniqueConstraintOp) DependsOn(another Operation) bool {
|
||||||
|
if rename, ok := another.(*RenameTableOp); ok {
|
||||||
|
return op.TableName == rename.NewName
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *DropUniqueConstraintOp) GetReverse() Operation {
|
||||||
|
return &AddUniqueConstraintOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
Unique: op.Unique,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangeColumnTypeOp set a new data type for the column.
|
||||||
|
// The two types should be such that the data can be auto-casted from one to another.
|
||||||
|
// E.g. reducing VARCHAR lenght is not possible in most dialects.
|
||||||
|
// AutoMigrator does not enforce or validate these rules.
|
||||||
|
type ChangeColumnTypeOp struct {
|
||||||
|
TableName string
|
||||||
|
Column string
|
||||||
|
From sqlschema.Column
|
||||||
|
To sqlschema.Column
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*ChangeColumnTypeOp)(nil)
|
||||||
|
|
||||||
|
func (op *ChangeColumnTypeOp) GetReverse() Operation {
|
||||||
|
return &ChangeColumnTypeOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
Column: op.Column,
|
||||||
|
From: op.To,
|
||||||
|
To: op.From,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropPrimaryKeyOp drops the table's PRIMARY KEY.
|
||||||
|
type DropPrimaryKeyOp struct {
|
||||||
|
TableName string
|
||||||
|
PrimaryKey sqlschema.PrimaryKey
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*DropPrimaryKeyOp)(nil)
|
||||||
|
|
||||||
|
func (op *DropPrimaryKeyOp) GetReverse() Operation {
|
||||||
|
return &AddPrimaryKeyOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
PrimaryKey: op.PrimaryKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPrimaryKeyOp adds a new PRIMARY KEY to the table.
|
||||||
|
type AddPrimaryKeyOp struct {
|
||||||
|
TableName string
|
||||||
|
PrimaryKey sqlschema.PrimaryKey
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*AddPrimaryKeyOp)(nil)
|
||||||
|
|
||||||
|
func (op *AddPrimaryKeyOp) GetReverse() Operation {
|
||||||
|
return &DropPrimaryKeyOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
PrimaryKey: op.PrimaryKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op *AddPrimaryKeyOp) DependsOn(another Operation) bool {
|
||||||
|
switch another := another.(type) {
|
||||||
|
case *AddColumnOp:
|
||||||
|
return op.TableName == another.TableName && op.PrimaryKey.Columns.Contains(another.ColumnName)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangePrimaryKeyOp changes the PRIMARY KEY of the table.
|
||||||
|
type ChangePrimaryKeyOp struct {
|
||||||
|
TableName string
|
||||||
|
Old sqlschema.PrimaryKey
|
||||||
|
New sqlschema.PrimaryKey
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Operation = (*AddPrimaryKeyOp)(nil)
|
||||||
|
|
||||||
|
func (op *ChangePrimaryKeyOp) GetReverse() Operation {
|
||||||
|
return &ChangePrimaryKeyOp{
|
||||||
|
TableName: op.TableName,
|
||||||
|
Old: op.New,
|
||||||
|
New: op.Old,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// comment denotes an Operation that cannot be executed.
|
||||||
|
//
|
||||||
|
// Operations, which cannot be reversed due to current technical limitations,
|
||||||
|
// may return &comment with a helpful message from their GetReverse() method.
|
||||||
|
//
|
||||||
|
// Chnagelog should skip it when applying operations or output as log message,
|
||||||
|
// and write it as an SQL comment when creating migration files.
|
||||||
|
type comment string
|
||||||
|
|
||||||
|
var _ Operation = (*comment)(nil)
|
||||||
|
|
||||||
|
func (c *comment) GetReverse() Operation { return c }
|
75
vendor/github.com/uptrace/bun/migrate/sqlschema/column.go
generated
vendored
Normal file
75
vendor/github.com/uptrace/bun/migrate/sqlschema/column.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
package sqlschema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Column interface {
|
||||||
|
GetName() string
|
||||||
|
GetSQLType() string
|
||||||
|
GetVarcharLen() int
|
||||||
|
GetDefaultValue() string
|
||||||
|
GetIsNullable() bool
|
||||||
|
GetIsAutoIncrement() bool
|
||||||
|
GetIsIdentity() bool
|
||||||
|
AppendQuery(schema.Formatter, []byte) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Column = (*BaseColumn)(nil)
|
||||||
|
|
||||||
|
// BaseColumn is a base column definition that stores various attributes of a column.
|
||||||
|
//
|
||||||
|
// Dialects and only dialects can use it to implement the Column interface.
|
||||||
|
// Other packages must use the Column interface.
|
||||||
|
type BaseColumn struct {
|
||||||
|
Name string
|
||||||
|
SQLType string
|
||||||
|
VarcharLen int
|
||||||
|
DefaultValue string
|
||||||
|
IsNullable bool
|
||||||
|
IsAutoIncrement bool
|
||||||
|
IsIdentity bool
|
||||||
|
// TODO: add Precision and Cardinality for timestamps/bit-strings/floats and arrays respectively.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd BaseColumn) GetName() string {
|
||||||
|
return cd.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd BaseColumn) GetSQLType() string {
|
||||||
|
return cd.SQLType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd BaseColumn) GetVarcharLen() int {
|
||||||
|
return cd.VarcharLen
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd BaseColumn) GetDefaultValue() string {
|
||||||
|
return cd.DefaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd BaseColumn) GetIsNullable() bool {
|
||||||
|
return cd.IsNullable
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd BaseColumn) GetIsAutoIncrement() bool {
|
||||||
|
return cd.IsAutoIncrement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd BaseColumn) GetIsIdentity() bool {
|
||||||
|
return cd.IsIdentity
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendQuery appends full SQL data type.
|
||||||
|
func (c *BaseColumn) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, err error) {
|
||||||
|
b = append(b, c.SQLType...)
|
||||||
|
if c.VarcharLen == 0 {
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
b = append(b, "("...)
|
||||||
|
b = append(b, fmt.Sprint(c.VarcharLen)...)
|
||||||
|
b = append(b, ")"...)
|
||||||
|
return b, nil
|
||||||
|
}
|
127
vendor/github.com/uptrace/bun/migrate/sqlschema/database.go
generated
vendored
Normal file
127
vendor/github.com/uptrace/bun/migrate/sqlschema/database.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
package sqlschema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun/schema"
|
||||||
|
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Database interface {
|
||||||
|
GetTables() *orderedmap.OrderedMap[string, Table]
|
||||||
|
GetForeignKeys() map[ForeignKey]string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Database = (*BaseDatabase)(nil)
|
||||||
|
|
||||||
|
// BaseDatabase is a base database definition.
|
||||||
|
//
|
||||||
|
// Dialects and only dialects can use it to implement the Database interface.
|
||||||
|
// Other packages must use the Database interface.
|
||||||
|
type BaseDatabase struct {
|
||||||
|
Tables *orderedmap.OrderedMap[string, Table]
|
||||||
|
ForeignKeys map[ForeignKey]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds BaseDatabase) GetTables() *orderedmap.OrderedMap[string, Table] {
|
||||||
|
return ds.Tables
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds BaseDatabase) GetForeignKeys() map[ForeignKey]string {
|
||||||
|
return ds.ForeignKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForeignKey struct {
|
||||||
|
From ColumnReference
|
||||||
|
To ColumnReference
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewColumnReference(tableName string, columns ...string) ColumnReference {
|
||||||
|
return ColumnReference{
|
||||||
|
TableName: tableName,
|
||||||
|
Column: NewColumns(columns...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fk ForeignKey) DependsOnTable(tableName string) bool {
|
||||||
|
return fk.From.TableName == tableName || fk.To.TableName == tableName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fk ForeignKey) DependsOnColumn(tableName string, column string) bool {
|
||||||
|
return fk.DependsOnTable(tableName) &&
|
||||||
|
(fk.From.Column.Contains(column) || fk.To.Column.Contains(column))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Columns is a hashable representation of []string used to define schema constraints that depend on multiple columns.
|
||||||
|
// Although having duplicated column references in these constraints is illegal, Columns neither validates nor enforces this constraint on the caller.
|
||||||
|
type Columns string
|
||||||
|
|
||||||
|
// NewColumns creates a composite column from a slice of column names.
|
||||||
|
func NewColumns(columns ...string) Columns {
|
||||||
|
slices.Sort(columns)
|
||||||
|
return Columns(strings.Join(columns, ","))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Columns) String() string {
|
||||||
|
return string(*c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Columns) AppendQuery(fmter schema.Formatter, b []byte) ([]byte, error) {
|
||||||
|
return schema.Safe(*c).AppendQuery(fmter, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split returns a slice of column names that make up the composite.
|
||||||
|
func (c *Columns) Split() []string {
|
||||||
|
return strings.Split(c.String(), ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsColumns checks that columns in "other" are a subset of current colums.
|
||||||
|
func (c *Columns) ContainsColumns(other Columns) bool {
|
||||||
|
columns := c.Split()
|
||||||
|
Outer:
|
||||||
|
for _, check := range other.Split() {
|
||||||
|
for _, column := range columns {
|
||||||
|
if check == column {
|
||||||
|
continue Outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains checks that a composite column contains the current column.
|
||||||
|
func (c *Columns) Contains(other string) bool {
|
||||||
|
return c.ContainsColumns(Columns(other))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace renames a column if it is part of the composite.
|
||||||
|
// If a composite consists of multiple columns, only one column will be renamed.
|
||||||
|
func (c *Columns) Replace(oldColumn, newColumn string) bool {
|
||||||
|
columns := c.Split()
|
||||||
|
for i, column := range columns {
|
||||||
|
if column == oldColumn {
|
||||||
|
columns[i] = newColumn
|
||||||
|
*c = NewColumns(columns...)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique represents a unique constraint defined on 1 or more columns.
|
||||||
|
type Unique struct {
|
||||||
|
Name string
|
||||||
|
Columns Columns
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals checks that two unique constraint are the same, assuming both are defined for the same table.
|
||||||
|
func (u Unique) Equals(other Unique) bool {
|
||||||
|
return u.Columns == other.Columns
|
||||||
|
}
|
||||||
|
|
||||||
|
type ColumnReference struct {
|
||||||
|
TableName string
|
||||||
|
Column Columns
|
||||||
|
}
|
241
vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go
generated
vendored
Normal file
241
vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go
generated
vendored
Normal file
|
@ -0,0 +1,241 @@
|
||||||
|
package sqlschema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/schema"
|
||||||
|
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InspectorDialect interface {
|
||||||
|
schema.Dialect
|
||||||
|
|
||||||
|
// Inspector returns a new instance of Inspector for the dialect.
|
||||||
|
// Dialects MAY set their default InspectorConfig values in constructor
|
||||||
|
// but MUST apply InspectorOptions to ensure they can be overriden.
|
||||||
|
//
|
||||||
|
// Use ApplyInspectorOptions to reduce boilerplate.
|
||||||
|
NewInspector(db *bun.DB, options ...InspectorOption) Inspector
|
||||||
|
|
||||||
|
// CompareType returns true if col1 and co2 SQL types are equivalent,
|
||||||
|
// i.e. they might use dialect-specifc type aliases (SERIAL ~ SMALLINT)
|
||||||
|
// or specify the same VARCHAR length differently (VARCHAR(255) ~ VARCHAR).
|
||||||
|
CompareType(Column, Column) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// InspectorConfig controls the scope of migration by limiting the objects Inspector should return.
|
||||||
|
// Inspectors SHOULD use the configuration directly instead of copying it, or MAY choose to embed it,
|
||||||
|
// to make sure options are always applied correctly.
|
||||||
|
type InspectorConfig struct {
|
||||||
|
// SchemaName limits inspection to tables in a particular schema.
|
||||||
|
SchemaName string
|
||||||
|
|
||||||
|
// ExcludeTables from inspection.
|
||||||
|
ExcludeTables []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inspector reads schema state.
|
||||||
|
type Inspector interface {
|
||||||
|
Inspect(ctx context.Context) (Database, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSchemaName(schemaName string) InspectorOption {
|
||||||
|
return func(cfg *InspectorConfig) {
|
||||||
|
cfg.SchemaName = schemaName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithExcludeTables works in append-only mode, i.e. tables cannot be re-included.
|
||||||
|
func WithExcludeTables(tables ...string) InspectorOption {
|
||||||
|
return func(cfg *InspectorConfig) {
|
||||||
|
cfg.ExcludeTables = append(cfg.ExcludeTables, tables...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInspector creates a new database inspector, if the dialect supports it.
|
||||||
|
func NewInspector(db *bun.DB, options ...InspectorOption) (Inspector, error) {
|
||||||
|
dialect, ok := (db.Dialect()).(InspectorDialect)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("%s does not implement sqlschema.Inspector", db.Dialect().Name())
|
||||||
|
}
|
||||||
|
return &inspector{
|
||||||
|
Inspector: dialect.NewInspector(db, options...),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBunModelInspector(tables *schema.Tables, options ...InspectorOption) *BunModelInspector {
|
||||||
|
bmi := &BunModelInspector{
|
||||||
|
tables: tables,
|
||||||
|
}
|
||||||
|
ApplyInspectorOptions(&bmi.InspectorConfig, options...)
|
||||||
|
return bmi
|
||||||
|
}
|
||||||
|
|
||||||
|
type InspectorOption func(*InspectorConfig)
|
||||||
|
|
||||||
|
func ApplyInspectorOptions(cfg *InspectorConfig, options ...InspectorOption) {
|
||||||
|
for _, opt := range options {
|
||||||
|
opt(cfg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// inspector is opaque pointer to a database inspector.
|
||||||
|
type inspector struct {
|
||||||
|
Inspector
|
||||||
|
}
|
||||||
|
|
||||||
|
// BunModelInspector creates the current project state from the passed bun.Models.
|
||||||
|
// Do not recycle BunModelInspector for different sets of models, as older models will not be de-registerred before the next run.
|
||||||
|
type BunModelInspector struct {
|
||||||
|
InspectorConfig
|
||||||
|
tables *schema.Tables
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Inspector = (*BunModelInspector)(nil)
|
||||||
|
|
||||||
|
func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) {
|
||||||
|
state := BunModelSchema{
|
||||||
|
BaseDatabase: BaseDatabase{
|
||||||
|
ForeignKeys: make(map[ForeignKey]string),
|
||||||
|
},
|
||||||
|
Tables: orderedmap.New[string, Table](),
|
||||||
|
}
|
||||||
|
for _, t := range bmi.tables.All() {
|
||||||
|
if t.Schema != bmi.SchemaName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
columns := orderedmap.New[string, Column]()
|
||||||
|
for _, f := range t.Fields {
|
||||||
|
|
||||||
|
sqlType, length, err := parseLen(f.CreateTableSQLType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parse length in %q: %w", f.CreateTableSQLType, err)
|
||||||
|
}
|
||||||
|
columns.Set(f.Name, &BaseColumn{
|
||||||
|
Name: f.Name,
|
||||||
|
SQLType: strings.ToLower(sqlType), // TODO(dyma): maybe this is not necessary after Column.Eq()
|
||||||
|
VarcharLen: length,
|
||||||
|
DefaultValue: exprToLower(f.SQLDefault),
|
||||||
|
IsNullable: !f.NotNull,
|
||||||
|
IsAutoIncrement: f.AutoIncrement,
|
||||||
|
IsIdentity: f.Identity,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var unique []Unique
|
||||||
|
for name, group := range t.Unique {
|
||||||
|
// Create a separate unique index for single-column unique constraints
|
||||||
|
// let each dialect apply the default naming convention.
|
||||||
|
if name == "" {
|
||||||
|
for _, f := range group {
|
||||||
|
unique = append(unique, Unique{Columns: NewColumns(f.Name)})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the name if it is a "unique group", in which case the user has provided the name.
|
||||||
|
var columns []string
|
||||||
|
for _, f := range group {
|
||||||
|
columns = append(columns, f.Name)
|
||||||
|
}
|
||||||
|
unique = append(unique, Unique{Name: name, Columns: NewColumns(columns...)})
|
||||||
|
}
|
||||||
|
|
||||||
|
var pk *PrimaryKey
|
||||||
|
if len(t.PKs) > 0 {
|
||||||
|
var columns []string
|
||||||
|
for _, f := range t.PKs {
|
||||||
|
columns = append(columns, f.Name)
|
||||||
|
}
|
||||||
|
pk = &PrimaryKey{Columns: NewColumns(columns...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// In cases where a table is defined in a non-default schema in the `bun:table` tag,
|
||||||
|
// schema.Table only extracts the name of the schema, but passes the entire tag value to t.Name
|
||||||
|
// for backwads-compatibility. For example, a bun model like this:
|
||||||
|
// type Model struct { bun.BaseModel `bun:"table:favourite.books` }
|
||||||
|
// produces
|
||||||
|
// schema.Table{ Schema: "favourite", Name: "favourite.books" }
|
||||||
|
tableName := strings.TrimPrefix(t.Name, t.Schema+".")
|
||||||
|
state.Tables.Set(tableName, &BunTable{
|
||||||
|
BaseTable: BaseTable{
|
||||||
|
Schema: t.Schema,
|
||||||
|
Name: tableName,
|
||||||
|
Columns: columns,
|
||||||
|
UniqueConstraints: unique,
|
||||||
|
PrimaryKey: pk,
|
||||||
|
},
|
||||||
|
Model: t.ZeroIface,
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, rel := range t.Relations {
|
||||||
|
// These relations are nominal and do not need a foreign key to be declared in the current table.
|
||||||
|
// They will be either expressed as N:1 relations in an m2m mapping table, or will be referenced by the other table if it's a 1:N.
|
||||||
|
if rel.Type == schema.ManyToManyRelation ||
|
||||||
|
rel.Type == schema.HasManyRelation {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var fromCols, toCols []string
|
||||||
|
for _, f := range rel.BasePKs {
|
||||||
|
fromCols = append(fromCols, f.Name)
|
||||||
|
}
|
||||||
|
for _, f := range rel.JoinPKs {
|
||||||
|
toCols = append(toCols, f.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
target := rel.JoinTable
|
||||||
|
state.ForeignKeys[ForeignKey{
|
||||||
|
From: NewColumnReference(t.Name, fromCols...),
|
||||||
|
To: NewColumnReference(target.Name, toCols...),
|
||||||
|
}] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return state, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseLen(typ string) (string, int, error) {
|
||||||
|
paren := strings.Index(typ, "(")
|
||||||
|
if paren == -1 {
|
||||||
|
return typ, 0, nil
|
||||||
|
}
|
||||||
|
length, err := strconv.Atoi(typ[paren+1 : len(typ)-1])
|
||||||
|
if err != nil {
|
||||||
|
return typ, 0, err
|
||||||
|
}
|
||||||
|
return typ[:paren], length, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// exprToLower converts string to lowercase, if it does not contain a string literal 'lit'.
|
||||||
|
// Use it to ensure that user-defined default values in the models are always comparable
|
||||||
|
// to those returned by the database inspector, regardless of the case convention in individual drivers.
|
||||||
|
func exprToLower(s string) string {
|
||||||
|
if strings.HasPrefix(s, "'") && strings.HasSuffix(s, "'") {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return strings.ToLower(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BunModelSchema is the schema state derived from bun table models.
|
||||||
|
type BunModelSchema struct {
|
||||||
|
BaseDatabase
|
||||||
|
|
||||||
|
Tables *orderedmap.OrderedMap[string, Table]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms BunModelSchema) GetTables() *orderedmap.OrderedMap[string, Table] {
|
||||||
|
return ms.Tables
|
||||||
|
}
|
||||||
|
|
||||||
|
// BunTable provides additional table metadata that is only accessible from scanning bun models.
|
||||||
|
type BunTable struct {
|
||||||
|
BaseTable
|
||||||
|
|
||||||
|
// Model stores the zero interface to the underlying Go struct.
|
||||||
|
Model interface{}
|
||||||
|
}
|
49
vendor/github.com/uptrace/bun/migrate/sqlschema/migrator.go
generated
vendored
Normal file
49
vendor/github.com/uptrace/bun/migrate/sqlschema/migrator.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
package sqlschema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MigratorDialect interface {
|
||||||
|
schema.Dialect
|
||||||
|
NewMigrator(db *bun.DB, schemaName string) Migrator
|
||||||
|
}
|
||||||
|
|
||||||
|
type Migrator interface {
|
||||||
|
AppendSQL(b []byte, operation interface{}) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrator is a dialect-agnostic wrapper for sqlschema.MigratorDialect.
|
||||||
|
type migrator struct {
|
||||||
|
Migrator
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMigrator(db *bun.DB, schemaName string) (Migrator, error) {
|
||||||
|
md, ok := db.Dialect().(MigratorDialect)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("%q dialect does not implement sqlschema.Migrator", db.Dialect().Name())
|
||||||
|
}
|
||||||
|
return &migrator{
|
||||||
|
Migrator: md.NewMigrator(db, schemaName),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BaseMigrator can be embeded by dialect's Migrator implementations to re-use some of the existing bun queries.
|
||||||
|
type BaseMigrator struct {
|
||||||
|
db *bun.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBaseMigrator(db *bun.DB) *BaseMigrator {
|
||||||
|
return &BaseMigrator{db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BaseMigrator) AppendCreateTable(b []byte, model interface{}) ([]byte, error) {
|
||||||
|
return m.db.NewCreateTable().Model(model).AppendQuery(m.db.Formatter(), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BaseMigrator) AppendDropTable(b []byte, schemaName, tableName string) ([]byte, error) {
|
||||||
|
return m.db.NewDropTable().TableExpr("?.?", bun.Ident(schemaName), bun.Ident(tableName)).AppendQuery(m.db.Formatter(), b)
|
||||||
|
}
|
60
vendor/github.com/uptrace/bun/migrate/sqlschema/table.go
generated
vendored
Normal file
60
vendor/github.com/uptrace/bun/migrate/sqlschema/table.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
package sqlschema
|
||||||
|
|
||||||
|
import (
|
||||||
|
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Table interface {
|
||||||
|
GetSchema() string
|
||||||
|
GetName() string
|
||||||
|
GetColumns() *orderedmap.OrderedMap[string, Column]
|
||||||
|
GetPrimaryKey() *PrimaryKey
|
||||||
|
GetUniqueConstraints() []Unique
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Table = (*BaseTable)(nil)
|
||||||
|
|
||||||
|
// BaseTable is a base table definition.
|
||||||
|
//
|
||||||
|
// Dialects and only dialects can use it to implement the Table interface.
|
||||||
|
// Other packages must use the Table interface.
|
||||||
|
type BaseTable struct {
|
||||||
|
Schema string
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// ColumnDefinitions map each column name to the column definition.
|
||||||
|
Columns *orderedmap.OrderedMap[string, Column]
|
||||||
|
|
||||||
|
// PrimaryKey holds the primary key definition.
|
||||||
|
// A nil value means that no primary key is defined for the table.
|
||||||
|
PrimaryKey *PrimaryKey
|
||||||
|
|
||||||
|
// UniqueConstraints defined on the table.
|
||||||
|
UniqueConstraints []Unique
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryKey represents a primary key constraint defined on 1 or more columns.
|
||||||
|
type PrimaryKey struct {
|
||||||
|
Name string
|
||||||
|
Columns Columns
|
||||||
|
}
|
||||||
|
|
||||||
|
func (td *BaseTable) GetSchema() string {
|
||||||
|
return td.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (td *BaseTable) GetName() string {
|
||||||
|
return td.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (td *BaseTable) GetColumns() *orderedmap.OrderedMap[string, Column] {
|
||||||
|
return td.Columns
|
||||||
|
}
|
||||||
|
|
||||||
|
func (td *BaseTable) GetPrimaryKey() *PrimaryKey {
|
||||||
|
return td.PrimaryKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (td *BaseTable) GetUniqueConstraints() []Unique {
|
||||||
|
return td.UniqueConstraints
|
||||||
|
}
|
11
vendor/github.com/uptrace/bun/model_table_has_many.go
generated
vendored
11
vendor/github.com/uptrace/bun/model_table_has_many.go
generated
vendored
|
@ -51,7 +51,7 @@ func (m *hasManyModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error
|
||||||
dest := makeDest(m, len(columns))
|
dest := makeDest(m, len(columns))
|
||||||
|
|
||||||
var n int
|
var n int
|
||||||
|
m.structKey = make([]interface{}, len(m.rel.JoinPKs))
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
if m.sliceOfPtr {
|
if m.sliceOfPtr {
|
||||||
m.strct = reflect.New(m.table.Type).Elem()
|
m.strct = reflect.New(m.table.Type).Elem()
|
||||||
|
@ -59,9 +59,8 @@ func (m *hasManyModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error
|
||||||
m.strct.Set(m.table.ZeroValue)
|
m.strct.Set(m.table.ZeroValue)
|
||||||
}
|
}
|
||||||
m.structInited = false
|
m.structInited = false
|
||||||
|
|
||||||
m.scanIndex = 0
|
m.scanIndex = 0
|
||||||
m.structKey = m.structKey[:0]
|
|
||||||
if err := rows.Scan(dest...); err != nil {
|
if err := rows.Scan(dest...); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -92,9 +91,9 @@ func (m *hasManyModel) Scan(src interface{}) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range m.rel.JoinPKs {
|
for i, f := range m.rel.JoinPKs {
|
||||||
if f.Name == field.Name {
|
if f.Name == column {
|
||||||
m.structKey = append(m.structKey, indirectFieldValue(field.Value(m.strct)))
|
m.structKey[i] = indirectFieldValue(field.Value(m.strct))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/uptrace/bun/package.json
generated
vendored
2
vendor/github.com/uptrace/bun/package.json
generated
vendored
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "gobun",
|
"name": "gobun",
|
||||||
"version": "1.2.5",
|
"version": "1.2.6",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"repository": "git@github.com:uptrace/bun.git",
|
"repository": "git@github.com:uptrace/bun.git",
|
||||||
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
|
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
|
||||||
|
|
112
vendor/github.com/uptrace/bun/query_base.go
generated
vendored
112
vendor/github.com/uptrace/bun/query_base.go
generated
vendored
|
@ -6,6 +6,8 @@
|
||||||
"database/sql/driver"
|
"database/sql/driver"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/uptrace/bun/dialect"
|
"github.com/uptrace/bun/dialect"
|
||||||
|
@ -1352,3 +1354,113 @@ func (ih *idxHintsQuery) bufIndexHint(
|
||||||
b = append(b, ")"...)
|
b = append(b, ")"...)
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type orderLimitOffsetQuery struct {
|
||||||
|
order []schema.QueryWithArgs
|
||||||
|
|
||||||
|
limit int32
|
||||||
|
offset int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *orderLimitOffsetQuery) addOrder(orders ...string) {
|
||||||
|
for _, order := range orders {
|
||||||
|
if order == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
index := strings.IndexByte(order, ' ')
|
||||||
|
if index == -1 {
|
||||||
|
q.order = append(q.order, schema.UnsafeIdent(order))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
field := order[:index]
|
||||||
|
sort := order[index+1:]
|
||||||
|
|
||||||
|
switch strings.ToUpper(sort) {
|
||||||
|
case "ASC", "DESC", "ASC NULLS FIRST", "DESC NULLS FIRST",
|
||||||
|
"ASC NULLS LAST", "DESC NULLS LAST":
|
||||||
|
q.order = append(q.order, schema.SafeQuery("? ?", []interface{}{
|
||||||
|
Ident(field),
|
||||||
|
Safe(sort),
|
||||||
|
}))
|
||||||
|
default:
|
||||||
|
q.order = append(q.order, schema.UnsafeIdent(order))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *orderLimitOffsetQuery) addOrderExpr(query string, args ...interface{}) {
|
||||||
|
q.order = append(q.order, schema.SafeQuery(query, args))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *orderLimitOffsetQuery) appendOrder(fmter schema.Formatter, b []byte) (_ []byte, err error) {
|
||||||
|
if len(q.order) > 0 {
|
||||||
|
b = append(b, " ORDER BY "...)
|
||||||
|
|
||||||
|
for i, f := range q.order {
|
||||||
|
if i > 0 {
|
||||||
|
b = append(b, ", "...)
|
||||||
|
}
|
||||||
|
b, err = f.AppendQuery(fmter, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MSSQL: allows Limit() without Order() as per https://stackoverflow.com/a/36156953
|
||||||
|
if q.limit > 0 && fmter.Dialect().Name() == dialect.MSSQL {
|
||||||
|
return append(b, " ORDER BY _temp_sort"...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *orderLimitOffsetQuery) setLimit(n int) {
|
||||||
|
q.limit = int32(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *orderLimitOffsetQuery) setOffset(n int) {
|
||||||
|
q.offset = int32(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *orderLimitOffsetQuery) appendLimitOffset(fmter schema.Formatter, b []byte) (_ []byte, err error) {
|
||||||
|
if fmter.Dialect().Features().Has(feature.OffsetFetch) {
|
||||||
|
if q.limit > 0 && q.offset > 0 {
|
||||||
|
b = append(b, " OFFSET "...)
|
||||||
|
b = strconv.AppendInt(b, int64(q.offset), 10)
|
||||||
|
b = append(b, " ROWS"...)
|
||||||
|
|
||||||
|
b = append(b, " FETCH NEXT "...)
|
||||||
|
b = strconv.AppendInt(b, int64(q.limit), 10)
|
||||||
|
b = append(b, " ROWS ONLY"...)
|
||||||
|
} else if q.limit > 0 {
|
||||||
|
b = append(b, " OFFSET 0 ROWS"...)
|
||||||
|
|
||||||
|
b = append(b, " FETCH NEXT "...)
|
||||||
|
b = strconv.AppendInt(b, int64(q.limit), 10)
|
||||||
|
b = append(b, " ROWS ONLY"...)
|
||||||
|
} else if q.offset > 0 {
|
||||||
|
b = append(b, " OFFSET "...)
|
||||||
|
b = strconv.AppendInt(b, int64(q.offset), 10)
|
||||||
|
b = append(b, " ROWS"...)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if q.limit > 0 {
|
||||||
|
b = append(b, " LIMIT "...)
|
||||||
|
b = strconv.AppendInt(b, int64(q.limit), 10)
|
||||||
|
}
|
||||||
|
if q.offset > 0 {
|
||||||
|
b = append(b, " OFFSET "...)
|
||||||
|
b = strconv.AppendInt(b, int64(q.offset), 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
7
vendor/github.com/uptrace/bun/query_column_add.go
generated
vendored
7
vendor/github.com/uptrace/bun/query_column_add.go
generated
vendored
|
@ -42,9 +42,12 @@ func (q *AddColumnQuery) Err(err error) *AddColumnQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *AddColumnQuery) Apply(fn func(*AddColumnQuery) *AddColumnQuery) *AddColumnQuery {
|
// Apply calls each function in fns, passing the AddColumnQuery as an argument.
|
||||||
|
func (q *AddColumnQuery) Apply(fns ...func(*AddColumnQuery) *AddColumnQuery) *AddColumnQuery {
|
||||||
|
for _, fn := range fns {
|
||||||
if fn != nil {
|
if fn != nil {
|
||||||
return fn(q)
|
q = fn(q)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
7
vendor/github.com/uptrace/bun/query_column_drop.go
generated
vendored
7
vendor/github.com/uptrace/bun/query_column_drop.go
generated
vendored
|
@ -40,9 +40,12 @@ func (q *DropColumnQuery) Err(err error) *DropColumnQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *DropColumnQuery) Apply(fn func(*DropColumnQuery) *DropColumnQuery) *DropColumnQuery {
|
// Apply calls each function in fns, passing the DropColumnQuery as an argument.
|
||||||
|
func (q *DropColumnQuery) Apply(fns ...func(*DropColumnQuery) *DropColumnQuery) *DropColumnQuery {
|
||||||
|
for _, fn := range fns {
|
||||||
if fn != nil {
|
if fn != nil {
|
||||||
return fn(q)
|
q = fn(q)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
61
vendor/github.com/uptrace/bun/query_delete.go
generated
vendored
61
vendor/github.com/uptrace/bun/query_delete.go
generated
vendored
|
@ -3,6 +3,7 @@
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/uptrace/bun/dialect/feature"
|
"github.com/uptrace/bun/dialect/feature"
|
||||||
|
@ -12,6 +13,7 @@
|
||||||
|
|
||||||
type DeleteQuery struct {
|
type DeleteQuery struct {
|
||||||
whereBaseQuery
|
whereBaseQuery
|
||||||
|
orderLimitOffsetQuery
|
||||||
returningQuery
|
returningQuery
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,10 +46,12 @@ func (q *DeleteQuery) Err(err error) *DeleteQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply calls the fn passing the DeleteQuery as an argument.
|
// Apply calls each function in fns, passing the DeleteQuery as an argument.
|
||||||
func (q *DeleteQuery) Apply(fn func(*DeleteQuery) *DeleteQuery) *DeleteQuery {
|
func (q *DeleteQuery) Apply(fns ...func(*DeleteQuery) *DeleteQuery) *DeleteQuery {
|
||||||
|
for _, fn := range fns {
|
||||||
if fn != nil {
|
if fn != nil {
|
||||||
return fn(q)
|
q = fn(q)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
@ -120,17 +124,50 @@ func (q *DeleteQuery) WhereAllWithDeleted() *DeleteQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *DeleteQuery) Order(orders ...string) *DeleteQuery {
|
||||||
|
if !q.hasFeature(feature.DeleteOrderLimit) {
|
||||||
|
q.err = errors.New("bun: order is not supported for current dialect")
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
q.addOrder(orders...)
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *DeleteQuery) OrderExpr(query string, args ...interface{}) *DeleteQuery {
|
||||||
|
if !q.hasFeature(feature.DeleteOrderLimit) {
|
||||||
|
q.err = errors.New("bun: order is not supported for current dialect")
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
q.addOrderExpr(query, args...)
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
func (q *DeleteQuery) ForceDelete() *DeleteQuery {
|
func (q *DeleteQuery) ForceDelete() *DeleteQuery {
|
||||||
q.flags = q.flags.Set(forceDeleteFlag)
|
q.flags = q.flags.Set(forceDeleteFlag)
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
func (q *DeleteQuery) Limit(n int) *DeleteQuery {
|
||||||
|
if !q.hasFeature(feature.DeleteOrderLimit) {
|
||||||
|
q.err = errors.New("bun: limit is not supported for current dialect")
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
q.setLimit(n)
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
// Returning adds a RETURNING clause to the query.
|
// Returning adds a RETURNING clause to the query.
|
||||||
//
|
//
|
||||||
// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`.
|
// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`.
|
||||||
func (q *DeleteQuery) Returning(query string, args ...interface{}) *DeleteQuery {
|
func (q *DeleteQuery) Returning(query string, args ...interface{}) *DeleteQuery {
|
||||||
|
if !q.hasFeature(feature.DeleteReturning) {
|
||||||
|
q.err = errors.New("bun: returning is not supported for current dialect")
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
q.addReturning(schema.SafeQuery(query, args))
|
q.addReturning(schema.SafeQuery(query, args))
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
@ -203,7 +240,21 @@ func (q *DeleteQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if q.hasFeature(feature.Returning) && q.hasReturning() {
|
if q.hasMultiTables() && (len(q.order) > 0 || q.limit > 0) {
|
||||||
|
return nil, errors.New("bun: can't use ORDER or LIMIT with multiple tables")
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = q.appendOrder(fmter, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = q.appendLimitOffset(fmter, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if q.hasFeature(feature.DeleteReturning) && q.hasReturning() {
|
||||||
b = append(b, " RETURNING "...)
|
b = append(b, " RETURNING "...)
|
||||||
b, err = q.appendReturning(fmter, b)
|
b, err = q.appendReturning(fmter, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -265,7 +316,7 @@ func (q *DeleteQuery) scanOrExec(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.Returning|feature.Output))
|
useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.DeleteReturning|feature.Output))
|
||||||
var model Model
|
var model Model
|
||||||
|
|
||||||
if useScan {
|
if useScan {
|
||||||
|
|
8
vendor/github.com/uptrace/bun/query_insert.go
generated
vendored
8
vendor/github.com/uptrace/bun/query_insert.go
generated
vendored
|
@ -53,10 +53,12 @@ func (q *InsertQuery) Err(err error) *InsertQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply calls the fn passing the SelectQuery as an argument.
|
// Apply calls each function in fns, passing the InsertQuery as an argument.
|
||||||
func (q *InsertQuery) Apply(fn func(*InsertQuery) *InsertQuery) *InsertQuery {
|
func (q *InsertQuery) Apply(fns ...func(*InsertQuery) *InsertQuery) *InsertQuery {
|
||||||
|
for _, fn := range fns {
|
||||||
if fn != nil {
|
if fn != nil {
|
||||||
return fn(q)
|
q = fn(q)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/uptrace/bun/query_merge.go
generated
vendored
8
vendor/github.com/uptrace/bun/query_merge.go
generated
vendored
|
@ -50,10 +50,12 @@ func (q *MergeQuery) Err(err error) *MergeQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply calls the fn passing the MergeQuery as an argument.
|
// Apply calls each function in fns, passing the MergeQuery as an argument.
|
||||||
func (q *MergeQuery) Apply(fn func(*MergeQuery) *MergeQuery) *MergeQuery {
|
func (q *MergeQuery) Apply(fns ...func(*MergeQuery) *MergeQuery) *MergeQuery {
|
||||||
|
for _, fn := range fns {
|
||||||
if fn != nil {
|
if fn != nil {
|
||||||
return fn(q)
|
q = fn(q)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
9
vendor/github.com/uptrace/bun/query_raw.go
generated
vendored
9
vendor/github.com/uptrace/bun/query_raw.go
generated
vendored
|
@ -96,3 +96,12 @@ func (q *RawQuery) AppendQuery(fmter schema.Formatter, b []byte) ([]byte, error)
|
||||||
func (q *RawQuery) Operation() string {
|
func (q *RawQuery) Operation() string {
|
||||||
return "SELECT"
|
return "SELECT"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *RawQuery) String() string {
|
||||||
|
buf, err := q.AppendQuery(q.db.Formatter(), nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(buf)
|
||||||
|
}
|
||||||
|
|
103
vendor/github.com/uptrace/bun/query_select.go
generated
vendored
103
vendor/github.com/uptrace/bun/query_select.go
generated
vendored
|
@ -6,8 +6,6 @@
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/uptrace/bun/dialect"
|
"github.com/uptrace/bun/dialect"
|
||||||
|
@ -25,14 +23,12 @@ type union struct {
|
||||||
type SelectQuery struct {
|
type SelectQuery struct {
|
||||||
whereBaseQuery
|
whereBaseQuery
|
||||||
idxHintsQuery
|
idxHintsQuery
|
||||||
|
orderLimitOffsetQuery
|
||||||
|
|
||||||
distinctOn []schema.QueryWithArgs
|
distinctOn []schema.QueryWithArgs
|
||||||
joins []joinQuery
|
joins []joinQuery
|
||||||
group []schema.QueryWithArgs
|
group []schema.QueryWithArgs
|
||||||
having []schema.QueryWithArgs
|
having []schema.QueryWithArgs
|
||||||
order []schema.QueryWithArgs
|
|
||||||
limit int32
|
|
||||||
offset int32
|
|
||||||
selFor schema.QueryWithArgs
|
selFor schema.QueryWithArgs
|
||||||
|
|
||||||
union []union
|
union []union
|
||||||
|
@ -66,10 +62,12 @@ func (q *SelectQuery) Err(err error) *SelectQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply calls the fn passing the SelectQuery as an argument.
|
// Apply calls each function in fns, passing the SelectQuery as an argument.
|
||||||
func (q *SelectQuery) Apply(fn func(*SelectQuery) *SelectQuery) *SelectQuery {
|
func (q *SelectQuery) Apply(fns ...func(*SelectQuery) *SelectQuery) *SelectQuery {
|
||||||
|
for _, fn := range fns {
|
||||||
if fn != nil {
|
if fn != nil {
|
||||||
return fn(q)
|
q = fn(q)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
@ -279,46 +277,22 @@ func (q *SelectQuery) Having(having string, args ...interface{}) *SelectQuery {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *SelectQuery) Order(orders ...string) *SelectQuery {
|
func (q *SelectQuery) Order(orders ...string) *SelectQuery {
|
||||||
for _, order := range orders {
|
q.addOrder(orders...)
|
||||||
if order == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
index := strings.IndexByte(order, ' ')
|
|
||||||
if index == -1 {
|
|
||||||
q.order = append(q.order, schema.UnsafeIdent(order))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
field := order[:index]
|
|
||||||
sort := order[index+1:]
|
|
||||||
|
|
||||||
switch strings.ToUpper(sort) {
|
|
||||||
case "ASC", "DESC", "ASC NULLS FIRST", "DESC NULLS FIRST",
|
|
||||||
"ASC NULLS LAST", "DESC NULLS LAST":
|
|
||||||
q.order = append(q.order, schema.SafeQuery("? ?", []interface{}{
|
|
||||||
Ident(field),
|
|
||||||
Safe(sort),
|
|
||||||
}))
|
|
||||||
default:
|
|
||||||
q.order = append(q.order, schema.UnsafeIdent(order))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *SelectQuery) OrderExpr(query string, args ...interface{}) *SelectQuery {
|
func (q *SelectQuery) OrderExpr(query string, args ...interface{}) *SelectQuery {
|
||||||
q.order = append(q.order, schema.SafeQuery(query, args))
|
q.addOrderExpr(query, args...)
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *SelectQuery) Limit(n int) *SelectQuery {
|
func (q *SelectQuery) Limit(n int) *SelectQuery {
|
||||||
q.limit = int32(n)
|
q.setLimit(n)
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *SelectQuery) Offset(n int) *SelectQuery {
|
func (q *SelectQuery) Offset(n int) *SelectQuery {
|
||||||
q.offset = int32(n)
|
q.setOffset(n)
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -615,35 +589,9 @@ func (q *SelectQuery) appendQuery(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if fmter.Dialect().Features().Has(feature.OffsetFetch) {
|
b, err = q.appendLimitOffset(fmter, b)
|
||||||
if q.limit > 0 && q.offset > 0 {
|
if err != nil {
|
||||||
b = append(b, " OFFSET "...)
|
return nil, err
|
||||||
b = strconv.AppendInt(b, int64(q.offset), 10)
|
|
||||||
b = append(b, " ROWS"...)
|
|
||||||
|
|
||||||
b = append(b, " FETCH NEXT "...)
|
|
||||||
b = strconv.AppendInt(b, int64(q.limit), 10)
|
|
||||||
b = append(b, " ROWS ONLY"...)
|
|
||||||
} else if q.limit > 0 {
|
|
||||||
b = append(b, " OFFSET 0 ROWS"...)
|
|
||||||
|
|
||||||
b = append(b, " FETCH NEXT "...)
|
|
||||||
b = strconv.AppendInt(b, int64(q.limit), 10)
|
|
||||||
b = append(b, " ROWS ONLY"...)
|
|
||||||
} else if q.offset > 0 {
|
|
||||||
b = append(b, " OFFSET "...)
|
|
||||||
b = strconv.AppendInt(b, int64(q.offset), 10)
|
|
||||||
b = append(b, " ROWS"...)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if q.limit > 0 {
|
|
||||||
b = append(b, " LIMIT "...)
|
|
||||||
b = strconv.AppendInt(b, int64(q.limit), 10)
|
|
||||||
}
|
|
||||||
if q.offset > 0 {
|
|
||||||
b = append(b, " OFFSET "...)
|
|
||||||
b = strconv.AppendInt(b, int64(q.offset), 10)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !q.selFor.IsZero() {
|
if !q.selFor.IsZero() {
|
||||||
|
@ -782,31 +730,6 @@ func (q *SelectQuery) appendTables(fmter schema.Formatter, b []byte) (_ []byte,
|
||||||
return q.appendTablesWithAlias(fmter, b)
|
return q.appendTablesWithAlias(fmter, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *SelectQuery) appendOrder(fmter schema.Formatter, b []byte) (_ []byte, err error) {
|
|
||||||
if len(q.order) > 0 {
|
|
||||||
b = append(b, " ORDER BY "...)
|
|
||||||
|
|
||||||
for i, f := range q.order {
|
|
||||||
if i > 0 {
|
|
||||||
b = append(b, ", "...)
|
|
||||||
}
|
|
||||||
b, err = f.AppendQuery(fmter, b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MSSQL: allows Limit() without Order() as per https://stackoverflow.com/a/36156953
|
|
||||||
if q.limit > 0 && fmter.Dialect().Name() == dialect.MSSQL {
|
|
||||||
return append(b, " ORDER BY _temp_sort"...), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
func (q *SelectQuery) Rows(ctx context.Context) (*sql.Rows, error) {
|
func (q *SelectQuery) Rows(ctx context.Context) (*sql.Rows, error) {
|
||||||
|
|
9
vendor/github.com/uptrace/bun/query_table_drop.go
generated
vendored
9
vendor/github.com/uptrace/bun/query_table_drop.go
generated
vendored
|
@ -151,3 +151,12 @@ func (q *DropTableQuery) afterDropTableHook(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *DropTableQuery) String() string {
|
||||||
|
buf, err := q.AppendQuery(q.db.Formatter(), nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(buf)
|
||||||
|
}
|
||||||
|
|
47
vendor/github.com/uptrace/bun/query_update.go
generated
vendored
47
vendor/github.com/uptrace/bun/query_update.go
generated
vendored
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
type UpdateQuery struct {
|
type UpdateQuery struct {
|
||||||
whereBaseQuery
|
whereBaseQuery
|
||||||
|
orderLimitOffsetQuery
|
||||||
returningQuery
|
returningQuery
|
||||||
customValueQuery
|
customValueQuery
|
||||||
setQuery
|
setQuery
|
||||||
|
@ -53,10 +54,12 @@ func (q *UpdateQuery) Err(err error) *UpdateQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply calls the fn passing the SelectQuery as an argument.
|
// Apply calls each function in fns, passing the UpdateQuery as an argument.
|
||||||
func (q *UpdateQuery) Apply(fn func(*UpdateQuery) *UpdateQuery) *UpdateQuery {
|
func (q *UpdateQuery) Apply(fns ...func(*UpdateQuery) *UpdateQuery) *UpdateQuery {
|
||||||
|
for _, fn := range fns {
|
||||||
if fn != nil {
|
if fn != nil {
|
||||||
return fn(q)
|
q = fn(q)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
@ -200,6 +203,34 @@ func (q *UpdateQuery) WhereAllWithDeleted() *UpdateQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
func (q *UpdateQuery) Order(orders ...string) *UpdateQuery {
|
||||||
|
if !q.hasFeature(feature.UpdateOrderLimit) {
|
||||||
|
q.err = errors.New("bun: order is not supported for current dialect")
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
q.addOrder(orders...)
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *UpdateQuery) OrderExpr(query string, args ...interface{}) *UpdateQuery {
|
||||||
|
if !q.hasFeature(feature.UpdateOrderLimit) {
|
||||||
|
q.err = errors.New("bun: order is not supported for current dialect")
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
q.addOrderExpr(query, args...)
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *UpdateQuery) Limit(n int) *UpdateQuery {
|
||||||
|
if !q.hasFeature(feature.UpdateOrderLimit) {
|
||||||
|
q.err = errors.New("bun: limit is not supported for current dialect")
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
q.setLimit(n)
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
// Returning adds a RETURNING clause to the query.
|
// Returning adds a RETURNING clause to the query.
|
||||||
|
@ -278,6 +309,16 @@ func (q *UpdateQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
b, err = q.appendOrder(fmter, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = q.appendLimitOffset(fmter, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if q.hasFeature(feature.Returning) && q.hasReturning() {
|
if q.hasFeature(feature.Returning) && q.hasReturning() {
|
||||||
b = append(b, " RETURNING "...)
|
b = append(b, " RETURNING "...)
|
||||||
b, err = q.appendReturning(fmter, b)
|
b, err = q.appendReturning(fmter, b)
|
||||||
|
|
7
vendor/github.com/uptrace/bun/schema/dialect.go
generated
vendored
7
vendor/github.com/uptrace/bun/schema/dialect.go
generated
vendored
|
@ -39,6 +39,9 @@ type Dialect interface {
|
||||||
// is mandatory in queries that modify the schema (CREATE TABLE / ADD COLUMN, etc).
|
// is mandatory in queries that modify the schema (CREATE TABLE / ADD COLUMN, etc).
|
||||||
// Dialects that do not have such requirement may return 0, which should be interpreted so by the caller.
|
// Dialects that do not have such requirement may return 0, which should be interpreted so by the caller.
|
||||||
DefaultVarcharLen() int
|
DefaultVarcharLen() int
|
||||||
|
|
||||||
|
// DefaultSchema should returns the name of the default database schema.
|
||||||
|
DefaultSchema() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------
|
||||||
|
@ -185,3 +188,7 @@ func (d *nopDialect) DefaultVarcharLen() int {
|
||||||
func (d *nopDialect) AppendSequence(b []byte, _ *Table, _ *Field) []byte {
|
func (d *nopDialect) AppendSequence(b []byte, _ *Table, _ *Field) []byte {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *nopDialect) DefaultSchema() string {
|
||||||
|
return "nop"
|
||||||
|
}
|
||||||
|
|
63
vendor/github.com/uptrace/bun/schema/table.go
generated
vendored
63
vendor/github.com/uptrace/bun/schema/table.go
generated
vendored
|
@ -45,6 +45,7 @@ type Table struct {
|
||||||
TypeName string
|
TypeName string
|
||||||
ModelName string
|
ModelName string
|
||||||
|
|
||||||
|
Schema string
|
||||||
Name string
|
Name string
|
||||||
SQLName Safe
|
SQLName Safe
|
||||||
SQLNameForSelects Safe
|
SQLNameForSelects Safe
|
||||||
|
@ -85,6 +86,7 @@ func (table *Table) init(dialect Dialect, typ reflect.Type, canAddr bool) {
|
||||||
table.setName(tableName)
|
table.setName(tableName)
|
||||||
table.Alias = table.ModelName
|
table.Alias = table.ModelName
|
||||||
table.SQLAlias = table.quoteIdent(table.ModelName)
|
table.SQLAlias = table.quoteIdent(table.ModelName)
|
||||||
|
table.Schema = dialect.DefaultSchema()
|
||||||
|
|
||||||
table.Fields = make([]*Field, 0, typ.NumField())
|
table.Fields = make([]*Field, 0, typ.NumField())
|
||||||
table.FieldMap = make(map[string]*Field, typ.NumField())
|
table.FieldMap = make(map[string]*Field, typ.NumField())
|
||||||
|
@ -244,6 +246,31 @@ type embeddedField struct {
|
||||||
subfield.SQLName = t.quoteIdent(subfield.Name)
|
subfield.SQLName = t.quoteIdent(subfield.Name)
|
||||||
}
|
}
|
||||||
t.addField(subfield)
|
t.addField(subfield)
|
||||||
|
if v, ok := subfield.Tag.Options["unique"]; ok {
|
||||||
|
t.addUnique(subfield, embfield.prefix, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Table) addUnique(field *Field, prefix string, tagOptions []string) {
|
||||||
|
var names []string
|
||||||
|
if len(tagOptions) == 1 {
|
||||||
|
// Split the value by comma, this will allow multiple names to be specified.
|
||||||
|
// We can use this to create multiple named unique constraints where a single column
|
||||||
|
// might be included in multiple constraints.
|
||||||
|
names = strings.Split(tagOptions[0], ",")
|
||||||
|
} else {
|
||||||
|
names = tagOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, uname := range names {
|
||||||
|
if t.Unique == nil {
|
||||||
|
t.Unique = make(map[string][]*Field)
|
||||||
|
}
|
||||||
|
if uname != "" && prefix != "" {
|
||||||
|
uname = prefix + uname
|
||||||
|
}
|
||||||
|
t.Unique[uname] = append(t.Unique[uname], field)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,10 +398,18 @@ func (t *Table) processBaseModelField(f reflect.StructField) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tag.Name != "" {
|
if tag.Name != "" {
|
||||||
|
schema, _ := t.schemaFromTagName(tag.Name)
|
||||||
|
t.Schema = schema
|
||||||
|
|
||||||
|
// Eventually, we should only assign the "table" portion as the table name,
|
||||||
|
// which will also require a change in how the table name is appended to queries.
|
||||||
|
// Until that is done, set table name to tag.Name.
|
||||||
t.setName(tag.Name)
|
t.setName(tag.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s, ok := tag.Option("table"); ok {
|
if s, ok := tag.Option("table"); ok {
|
||||||
|
schema, _ := t.schemaFromTagName(s)
|
||||||
|
t.Schema = schema
|
||||||
t.setName(s)
|
t.setName(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -388,6 +423,17 @@ func (t *Table) processBaseModelField(f reflect.StructField) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// schemaFromTagName splits the bun.BaseModel tag name into schema and table name
|
||||||
|
// in case it is specified in the "schema"."table" format.
|
||||||
|
// Assume default schema if one isn't explicitly specified.
|
||||||
|
func (t *Table) schemaFromTagName(name string) (string, string) {
|
||||||
|
schema, table := t.dialect.DefaultSchema(), name
|
||||||
|
if schemaTable := strings.Split(name, "."); len(schemaTable) == 2 {
|
||||||
|
schema, table = schemaTable[0], schemaTable[1]
|
||||||
|
}
|
||||||
|
return schema, table
|
||||||
|
}
|
||||||
|
|
||||||
// nolint
|
// nolint
|
||||||
func (t *Table) newField(sf reflect.StructField, tag tagparser.Tag) *Field {
|
func (t *Table) newField(sf reflect.StructField, tag tagparser.Tag) *Field {
|
||||||
sqlName := internal.Underscore(sf.Name)
|
sqlName := internal.Underscore(sf.Name)
|
||||||
|
@ -439,22 +485,7 @@ func (t *Table) newField(sf reflect.StructField, tag tagparser.Tag) *Field {
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := tag.Options["unique"]; ok {
|
if v, ok := tag.Options["unique"]; ok {
|
||||||
var names []string
|
t.addUnique(field, "", v)
|
||||||
if len(v) == 1 {
|
|
||||||
// Split the value by comma, this will allow multiple names to be specified.
|
|
||||||
// We can use this to create multiple named unique constraints where a single column
|
|
||||||
// might be included in multiple constraints.
|
|
||||||
names = strings.Split(v[0], ",")
|
|
||||||
} else {
|
|
||||||
names = v
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, uniqueName := range names {
|
|
||||||
if t.Unique == nil {
|
|
||||||
t.Unique = make(map[string][]*Field)
|
|
||||||
}
|
|
||||||
t.Unique[uniqueName] = append(t.Unique[uniqueName], field)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if s, ok := tag.Option("default"); ok {
|
if s, ok := tag.Option("default"); ok {
|
||||||
field.SQLDefault = s
|
field.SQLDefault = s
|
||||||
|
|
12
vendor/github.com/uptrace/bun/schema/tables.go
generated
vendored
12
vendor/github.com/uptrace/bun/schema/tables.go
generated
vendored
|
@ -77,6 +77,7 @@ func (t *Tables) InProgress(typ reflect.Type) *Table {
|
||||||
return table
|
return table
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByModel gets the table by its Go name.
|
||||||
func (t *Tables) ByModel(name string) *Table {
|
func (t *Tables) ByModel(name string) *Table {
|
||||||
var found *Table
|
var found *Table
|
||||||
t.tables.Range(func(typ reflect.Type, table *Table) bool {
|
t.tables.Range(func(typ reflect.Type, table *Table) bool {
|
||||||
|
@ -89,6 +90,7 @@ func (t *Tables) ByModel(name string) *Table {
|
||||||
return found
|
return found
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByName gets the table by its SQL name.
|
||||||
func (t *Tables) ByName(name string) *Table {
|
func (t *Tables) ByName(name string) *Table {
|
||||||
var found *Table
|
var found *Table
|
||||||
t.tables.Range(func(typ reflect.Type, table *Table) bool {
|
t.tables.Range(func(typ reflect.Type, table *Table) bool {
|
||||||
|
@ -100,3 +102,13 @@ func (t *Tables) ByName(name string) *Table {
|
||||||
})
|
})
|
||||||
return found
|
return found
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// All returns all registered tables.
|
||||||
|
func (t *Tables) All() []*Table {
|
||||||
|
var found []*Table
|
||||||
|
t.tables.Range(func(typ reflect.Type, table *Table) bool {
|
||||||
|
found = append(found, table)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
2
vendor/github.com/uptrace/bun/version.go
generated
vendored
2
vendor/github.com/uptrace/bun/version.go
generated
vendored
|
@ -2,5 +2,5 @@
|
||||||
|
|
||||||
// Version is the current release version.
|
// Version is the current release version.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "1.2.5"
|
return "1.2.6"
|
||||||
}
|
}
|
||||||
|
|
1
vendor/github.com/wk8/go-ordered-map/v2/.gitignore
generated
vendored
Normal file
1
vendor/github.com/wk8/go-ordered-map/v2/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
/vendor/
|
78
vendor/github.com/wk8/go-ordered-map/v2/.golangci.yml
generated
vendored
Normal file
78
vendor/github.com/wk8/go-ordered-map/v2/.golangci.yml
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
run:
|
||||||
|
tests: false
|
||||||
|
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- asciicheck
|
||||||
|
- bidichk
|
||||||
|
- bodyclose
|
||||||
|
- containedctx
|
||||||
|
- contextcheck
|
||||||
|
- decorder
|
||||||
|
# Disabling depguard as there is no guarded list of imports
|
||||||
|
# - depguard
|
||||||
|
- dogsled
|
||||||
|
- dupl
|
||||||
|
- durationcheck
|
||||||
|
- errcheck
|
||||||
|
- errchkjson
|
||||||
|
- errname
|
||||||
|
- errorlint
|
||||||
|
- exportloopref
|
||||||
|
- forbidigo
|
||||||
|
- funlen
|
||||||
|
# Don't need gci and goimports
|
||||||
|
# - gci
|
||||||
|
- gochecknoglobals
|
||||||
|
- gochecknoinits
|
||||||
|
- gocognit
|
||||||
|
- goconst
|
||||||
|
- gocritic
|
||||||
|
- gocyclo
|
||||||
|
- godox
|
||||||
|
- gofmt
|
||||||
|
- gofumpt
|
||||||
|
- goheader
|
||||||
|
- goimports
|
||||||
|
- mnd
|
||||||
|
- gomoddirectives
|
||||||
|
- gomodguard
|
||||||
|
- goprintffuncname
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- grouper
|
||||||
|
- importas
|
||||||
|
- ineffassign
|
||||||
|
- lll
|
||||||
|
- maintidx
|
||||||
|
- makezero
|
||||||
|
- misspell
|
||||||
|
- nakedret
|
||||||
|
- nilerr
|
||||||
|
- nilnil
|
||||||
|
- noctx
|
||||||
|
- nolintlint
|
||||||
|
- paralleltest
|
||||||
|
- prealloc
|
||||||
|
- predeclared
|
||||||
|
- promlinter
|
||||||
|
- revive
|
||||||
|
- rowserrcheck
|
||||||
|
- sqlclosecheck
|
||||||
|
- staticcheck
|
||||||
|
- stylecheck
|
||||||
|
- tagliatelle
|
||||||
|
- tenv
|
||||||
|
- testpackage
|
||||||
|
- thelper
|
||||||
|
- tparallel
|
||||||
|
# FIXME: doesn't support 1.23 yet
|
||||||
|
# - typecheck
|
||||||
|
- unconvert
|
||||||
|
- unparam
|
||||||
|
- unused
|
||||||
|
- varnamelen
|
||||||
|
- wastedassign
|
||||||
|
- whitespace
|
38
vendor/github.com/wk8/go-ordered-map/v2/CHANGELOG.md
generated
vendored
Normal file
38
vendor/github.com/wk8/go-ordered-map/v2/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
# Changelog
|
||||||
|
|
||||||
|
[comment]: # (Changes since last release go here)
|
||||||
|
|
||||||
|
## 2.1.8 - Jun 27th 2023
|
||||||
|
|
||||||
|
* Added support for YAML serialization/deserialization
|
||||||
|
|
||||||
|
## 2.1.7 - Apr 13th 2023
|
||||||
|
|
||||||
|
* Renamed test_utils.go to utils_test.go
|
||||||
|
|
||||||
|
## 2.1.6 - Feb 15th 2023
|
||||||
|
|
||||||
|
* Added `GetAndMoveToBack()` and `GetAndMoveToFront()` methods
|
||||||
|
|
||||||
|
## 2.1.5 - Dec 13th 2022
|
||||||
|
|
||||||
|
* Added `Value()` method
|
||||||
|
|
||||||
|
## 2.1.4 - Dec 12th 2022
|
||||||
|
|
||||||
|
* Fixed a bug with UTF-8 special characters in JSON keys
|
||||||
|
|
||||||
|
## 2.1.3 - Dec 11th 2022
|
||||||
|
|
||||||
|
* Added support for JSON marshalling/unmarshalling of wrapper of primitive types
|
||||||
|
|
||||||
|
## 2.1.2 - Dec 10th 2022
|
||||||
|
* Allowing to pass options to `New`, to give a capacity hint, or initial data
|
||||||
|
* Allowing to deserialize nested ordered maps from JSON without having to explicitly instantiate them
|
||||||
|
* Added the `AddPairs` method
|
||||||
|
|
||||||
|
## 2.1.1 - Dec 9th 2022
|
||||||
|
* Fixing a bug with JSON marshalling
|
||||||
|
|
||||||
|
## 2.1.0 - Dec 7th 2022
|
||||||
|
* Added support for JSON serialization/deserialization
|
201
vendor/github.com/wk8/go-ordered-map/v2/LICENSE
generated
vendored
Normal file
201
vendor/github.com/wk8/go-ordered-map/v2/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
32
vendor/github.com/wk8/go-ordered-map/v2/Makefile
generated
vendored
Normal file
32
vendor/github.com/wk8/go-ordered-map/v2/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
.DEFAULT_GOAL := all
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
|
all: test_with_fuzz lint
|
||||||
|
|
||||||
|
# the TEST_FLAGS env var can be set to eg run only specific tests
|
||||||
|
TEST_COMMAND = go test -v -count=1 -race -cover $(TEST_FLAGS)
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
$(TEST_COMMAND)
|
||||||
|
|
||||||
|
.PHONY: bench
|
||||||
|
bench:
|
||||||
|
go test -bench=.
|
||||||
|
|
||||||
|
FUZZ_TIME ?= 10s
|
||||||
|
|
||||||
|
# see https://github.com/golang/go/issues/46312
|
||||||
|
# and https://stackoverflow.com/a/72673487/4867444
|
||||||
|
# if we end up having more fuzz tests
|
||||||
|
.PHONY: test_with_fuzz
|
||||||
|
test_with_fuzz:
|
||||||
|
$(TEST_COMMAND) -fuzz=FuzzRoundTripJSON -fuzztime=$(FUZZ_TIME)
|
||||||
|
$(TEST_COMMAND) -fuzz=FuzzRoundTripYAML -fuzztime=$(FUZZ_TIME)
|
||||||
|
|
||||||
|
.PHONY: fuzz
|
||||||
|
fuzz: test_with_fuzz
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint:
|
||||||
|
golangci-lint run
|
207
vendor/github.com/wk8/go-ordered-map/v2/README.md
generated
vendored
Normal file
207
vendor/github.com/wk8/go-ordered-map/v2/README.md
generated
vendored
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
[![Go Reference](https://pkg.go.dev/badge/github.com/wk8/go-ordered-map/v2.svg)](https://pkg.go.dev/github.com/wk8/go-ordered-map/v2)
|
||||||
|
[![Build Status](https://circleci.com/gh/wk8/go-ordered-map.svg?style=svg)](https://app.circleci.com/pipelines/github/wk8/go-ordered-map)
|
||||||
|
|
||||||
|
# Golang Ordered Maps
|
||||||
|
|
||||||
|
Same as regular maps, but also remembers the order in which keys were inserted, akin to [Python's `collections.OrderedDict`s](https://docs.python.org/3.7/library/collections.html#ordereddict-objects).
|
||||||
|
|
||||||
|
It offers the following features:
|
||||||
|
* optimal runtime performance (all operations are constant time)
|
||||||
|
* optimal memory usage (only one copy of values, no unnecessary memory allocation)
|
||||||
|
* allows iterating from newest or oldest keys indifferently, without memory copy, allowing to `break` the iteration, and in time linear to the number of keys iterated over rather than the total length of the ordered map
|
||||||
|
* supports any generic types for both keys and values. If you're running go < 1.18, you can use [version 1](https://github.com/wk8/go-ordered-map/tree/v1) that takes and returns generic `interface{}`s instead of using generics
|
||||||
|
* idiomatic API, akin to that of [`container/list`](https://golang.org/pkg/container/list)
|
||||||
|
* support for JSON and YAML marshalling
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
[The full documentation is available on pkg.go.dev](https://pkg.go.dev/github.com/wk8/go-ordered-map/v2).
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
```bash
|
||||||
|
go get -u github.com/wk8/go-ordered-map/v2
|
||||||
|
```
|
||||||
|
|
||||||
|
Or use your favorite golang vendoring tool!
|
||||||
|
|
||||||
|
## Supported go versions
|
||||||
|
|
||||||
|
Go >= 1.23 is required to use version >= 2.2.0 of this library, as it uses generics and iterators.
|
||||||
|
|
||||||
|
if you're running go < 1.23, you can use [version 2.1.8](https://github.com/wk8/go-ordered-map/tree/v2.1.8) instead.
|
||||||
|
|
||||||
|
If you're running go < 1.18, you can use [version 1](https://github.com/wk8/go-ordered-map/tree/v1) instead.
|
||||||
|
|
||||||
|
## Example / usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/wk8/go-ordered-map/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
om := orderedmap.New[string, string]()
|
||||||
|
|
||||||
|
om.Set("foo", "bar")
|
||||||
|
om.Set("bar", "baz")
|
||||||
|
om.Set("coucou", "toi")
|
||||||
|
|
||||||
|
fmt.Println(om.Get("foo")) // => "bar", true
|
||||||
|
fmt.Println(om.Get("i dont exist")) // => "", false
|
||||||
|
|
||||||
|
// iterating pairs from oldest to newest:
|
||||||
|
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
fmt.Printf("%s => %s\n", pair.Key, pair.Value)
|
||||||
|
} // prints:
|
||||||
|
// foo => bar
|
||||||
|
// bar => baz
|
||||||
|
// coucou => toi
|
||||||
|
|
||||||
|
// iterating over the 2 newest pairs:
|
||||||
|
i := 0
|
||||||
|
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
|
||||||
|
fmt.Printf("%s => %s\n", pair.Key, pair.Value)
|
||||||
|
i++
|
||||||
|
if i >= 2 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} // prints:
|
||||||
|
// coucou => toi
|
||||||
|
// bar => baz
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
An `OrderedMap`'s keys must implement `comparable`, and its values can be anything, for example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type myStruct struct {
|
||||||
|
payload string
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
om := orderedmap.New[int, *myStruct]()
|
||||||
|
|
||||||
|
om.Set(12, &myStruct{"foo"})
|
||||||
|
om.Set(1, &myStruct{"bar"})
|
||||||
|
|
||||||
|
value, present := om.Get(12)
|
||||||
|
if !present {
|
||||||
|
panic("should be there!")
|
||||||
|
}
|
||||||
|
fmt.Println(value.payload) // => foo
|
||||||
|
|
||||||
|
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
fmt.Printf("%d => %s\n", pair.Key, pair.Value.payload)
|
||||||
|
} // prints:
|
||||||
|
// 12 => foo
|
||||||
|
// 1 => bar
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Also worth noting that you can provision ordered maps with a capacity hint, as you would do by passing an optional hint to `make(map[K]V, capacity`):
|
||||||
|
```go
|
||||||
|
om := orderedmap.New[int, *myStruct](28)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also pass in some initial data to store in the map:
|
||||||
|
```go
|
||||||
|
om := orderedmap.New[int, string](orderedmap.WithInitialData[int, string](
|
||||||
|
orderedmap.Pair[int, string]{
|
||||||
|
Key: 12,
|
||||||
|
Value: "foo",
|
||||||
|
},
|
||||||
|
orderedmap.Pair[int, string]{
|
||||||
|
Key: 28,
|
||||||
|
Value: "bar",
|
||||||
|
},
|
||||||
|
))
|
||||||
|
```
|
||||||
|
|
||||||
|
`OrderedMap`s also support JSON serialization/deserialization, and preserves order:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// serialization
|
||||||
|
data, err := json.Marshal(om)
|
||||||
|
...
|
||||||
|
|
||||||
|
// deserialization
|
||||||
|
om := orderedmap.New[string, string]() // or orderedmap.New[int, any](), or any type you expect
|
||||||
|
err := json.Unmarshal(data, &om)
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
Similarly, it also supports YAML serialization/deserialization using the yaml.v3 package, which also preserves order:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// serialization
|
||||||
|
data, err := yaml.Marshal(om)
|
||||||
|
...
|
||||||
|
|
||||||
|
// deserialization
|
||||||
|
om := orderedmap.New[string, string]() // or orderedmap.New[int, any](), or any type you expect
|
||||||
|
err := yaml.Unmarshal(data, &om)
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Iterator support (go >= 1.23)
|
||||||
|
|
||||||
|
The `FromOldest`, `FromNewest`, `KeysFromOldest`, `KeysFromNewest`, `ValuesFromOldest` and `ValuesFromNewest` methods return iterators over the map's pairs, starting from the oldest or newest pair, respectively.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
om := orderedmap.New[int, string]()
|
||||||
|
om.Set(1, "foo")
|
||||||
|
om.Set(2, "bar")
|
||||||
|
om.Set(3, "baz")
|
||||||
|
|
||||||
|
for k, v := range om.FromOldest() {
|
||||||
|
fmt.Printf("%d => %s\n", k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// prints:
|
||||||
|
// 1 => foo
|
||||||
|
// 2 => bar
|
||||||
|
// 3 => baz
|
||||||
|
|
||||||
|
for k := range om.KeysNewest() {
|
||||||
|
fmt.Printf("%d\n", k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// prints:
|
||||||
|
// 3
|
||||||
|
// 2
|
||||||
|
// 1
|
||||||
|
```
|
||||||
|
|
||||||
|
`From` is a convenience function that creates a new `OrderedMap` from an iterator over key-value pairs.
|
||||||
|
|
||||||
|
```go
|
||||||
|
om := orderedmap.New[int, string]()
|
||||||
|
om.Set(1, "foo")
|
||||||
|
om.Set(2, "bar")
|
||||||
|
om.Set(3, "baz")
|
||||||
|
|
||||||
|
om2 := orderedmap.From(om.FromOldest())
|
||||||
|
|
||||||
|
for k, v := range om2.FromOldest() {
|
||||||
|
fmt.Printf("%d => %s\n", k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// prints:
|
||||||
|
// 1 => foo
|
||||||
|
// 2 => bar
|
||||||
|
// 3 => baz
|
||||||
|
```
|
||||||
|
|
||||||
|
## Alternatives
|
||||||
|
|
||||||
|
There are several other ordered map golang implementations out there, but I believe that at the time of writing none of them offer the same functionality as this library; more specifically:
|
||||||
|
* [iancoleman/orderedmap](https://github.com/iancoleman/orderedmap) only accepts `string` keys, its `Delete` operations are linear
|
||||||
|
* [cevaris/ordered_map](https://github.com/cevaris/ordered_map) uses a channel for iterations, and leaks goroutines if the iteration is interrupted before fully traversing the map
|
||||||
|
* [mantyr/iterator](https://github.com/mantyr/iterator) also uses a channel for iterations, and its `Delete` operations are linear
|
||||||
|
* [samdolan/go-ordered-map](https://github.com/samdolan/go-ordered-map) adds unnecessary locking (users should add their own locking instead if they need it), its `Delete` and `Get` operations are linear, iterations trigger a linear memory allocation
|
182
vendor/github.com/wk8/go-ordered-map/v2/json.go
generated
vendored
Normal file
182
vendor/github.com/wk8/go-ordered-map/v2/json.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
package orderedmap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/buger/jsonparser"
|
||||||
|
"github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ json.Marshaler = &OrderedMap[int, any]{}
|
||||||
|
_ json.Unmarshaler = &OrderedMap[int, any]{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalJSON implements the json.Marshaler interface.
|
||||||
|
func (om *OrderedMap[K, V]) MarshalJSON() ([]byte, error) { //nolint:funlen
|
||||||
|
if om == nil || om.list == nil {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := jwriter.Writer{}
|
||||||
|
writer.RawByte('{')
|
||||||
|
|
||||||
|
for pair, firstIteration := om.Oldest(), true; pair != nil; pair = pair.Next() {
|
||||||
|
if firstIteration {
|
||||||
|
firstIteration = false
|
||||||
|
} else {
|
||||||
|
writer.RawByte(',')
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key := any(pair.Key).(type) {
|
||||||
|
case string:
|
||||||
|
writer.String(key)
|
||||||
|
case encoding.TextMarshaler:
|
||||||
|
writer.RawByte('"')
|
||||||
|
writer.Raw(key.MarshalText())
|
||||||
|
writer.RawByte('"')
|
||||||
|
case int:
|
||||||
|
writer.IntStr(key)
|
||||||
|
case int8:
|
||||||
|
writer.Int8Str(key)
|
||||||
|
case int16:
|
||||||
|
writer.Int16Str(key)
|
||||||
|
case int32:
|
||||||
|
writer.Int32Str(key)
|
||||||
|
case int64:
|
||||||
|
writer.Int64Str(key)
|
||||||
|
case uint:
|
||||||
|
writer.UintStr(key)
|
||||||
|
case uint8:
|
||||||
|
writer.Uint8Str(key)
|
||||||
|
case uint16:
|
||||||
|
writer.Uint16Str(key)
|
||||||
|
case uint32:
|
||||||
|
writer.Uint32Str(key)
|
||||||
|
case uint64:
|
||||||
|
writer.Uint64Str(key)
|
||||||
|
default:
|
||||||
|
|
||||||
|
// this switch takes care of wrapper types around primitive types, such as
|
||||||
|
// type myType string
|
||||||
|
switch keyValue := reflect.ValueOf(key); keyValue.Type().Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
writer.String(keyValue.String())
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
writer.Int64Str(keyValue.Int())
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
writer.Uint64Str(keyValue.Uint())
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported key type: %T", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.RawByte(':')
|
||||||
|
// the error is checked at the end of the function
|
||||||
|
writer.Raw(json.Marshal(pair.Value))
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.RawByte('}')
|
||||||
|
|
||||||
|
return dumpWriter(&writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dumpWriter(writer *jwriter.Writer) ([]byte, error) {
|
||||||
|
if writer.Error != nil {
|
||||||
|
return nil, writer.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.Grow(writer.Size())
|
||||||
|
if _, err := writer.DumpTo(&buf); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||||
|
func (om *OrderedMap[K, V]) UnmarshalJSON(data []byte) error {
|
||||||
|
if om.list == nil {
|
||||||
|
om.initialize(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return jsonparser.ObjectEach(
|
||||||
|
data,
|
||||||
|
func(keyData []byte, valueData []byte, dataType jsonparser.ValueType, offset int) error {
|
||||||
|
if dataType == jsonparser.String {
|
||||||
|
// jsonparser removes the enclosing quotes; we need to restore them to make a valid JSON
|
||||||
|
valueData = data[offset-len(valueData)-2 : offset]
|
||||||
|
}
|
||||||
|
|
||||||
|
var key K
|
||||||
|
var value V
|
||||||
|
|
||||||
|
switch typedKey := any(&key).(type) {
|
||||||
|
case *string:
|
||||||
|
s, err := decodeUTF8(keyData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*typedKey = s
|
||||||
|
case encoding.TextUnmarshaler:
|
||||||
|
if err := typedKey.UnmarshalText(keyData); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *int, *int8, *int16, *int32, *int64, *uint, *uint8, *uint16, *uint32, *uint64:
|
||||||
|
if err := json.Unmarshal(keyData, typedKey); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// this switch takes care of wrapper types around primitive types, such as
|
||||||
|
// type myType string
|
||||||
|
switch reflect.TypeOf(key).Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
s, err := decodeUTF8(keyData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
convertedKeyData := reflect.ValueOf(s).Convert(reflect.TypeOf(key))
|
||||||
|
reflect.ValueOf(&key).Elem().Set(convertedKeyData)
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
if err := json.Unmarshal(keyData, &key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported key type: %T", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(valueData, &value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
om.Set(key, value)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeUTF8(input []byte) (string, error) {
|
||||||
|
remaining, offset := input, 0
|
||||||
|
runes := make([]rune, 0, len(remaining))
|
||||||
|
|
||||||
|
for len(remaining) > 0 {
|
||||||
|
r, size := utf8.DecodeRune(remaining)
|
||||||
|
if r == utf8.RuneError && size <= 1 {
|
||||||
|
return "", fmt.Errorf("not a valid UTF-8 string (at position %d): %s", offset, string(input))
|
||||||
|
}
|
||||||
|
|
||||||
|
runes = append(runes, r)
|
||||||
|
remaining = remaining[size:]
|
||||||
|
offset += size
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(runes), nil
|
||||||
|
}
|
373
vendor/github.com/wk8/go-ordered-map/v2/orderedmap.go
generated
vendored
Normal file
373
vendor/github.com/wk8/go-ordered-map/v2/orderedmap.go
generated
vendored
Normal file
|
@ -0,0 +1,373 @@
|
||||||
|
// Package orderedmap implements an ordered map, i.e. a map that also keeps track of
|
||||||
|
// the order in which keys were inserted.
|
||||||
|
//
|
||||||
|
// All operations are constant-time.
|
||||||
|
//
|
||||||
|
// Github repo: https://github.com/wk8/go-ordered-map
|
||||||
|
package orderedmap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"iter"
|
||||||
|
|
||||||
|
list "github.com/bahlo/generic-list-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Pair[K comparable, V any] struct {
|
||||||
|
Key K
|
||||||
|
Value V
|
||||||
|
|
||||||
|
element *list.Element[*Pair[K, V]]
|
||||||
|
}
|
||||||
|
|
||||||
|
type OrderedMap[K comparable, V any] struct {
|
||||||
|
pairs map[K]*Pair[K, V]
|
||||||
|
list *list.List[*Pair[K, V]]
|
||||||
|
}
|
||||||
|
|
||||||
|
type initConfig[K comparable, V any] struct {
|
||||||
|
capacity int
|
||||||
|
initialData []Pair[K, V]
|
||||||
|
}
|
||||||
|
|
||||||
|
type InitOption[K comparable, V any] func(config *initConfig[K, V])
|
||||||
|
|
||||||
|
// WithCapacity allows giving a capacity hint for the map, akin to the standard make(map[K]V, capacity).
|
||||||
|
func WithCapacity[K comparable, V any](capacity int) InitOption[K, V] {
|
||||||
|
return func(c *initConfig[K, V]) {
|
||||||
|
c.capacity = capacity
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInitialData allows passing in initial data for the map.
|
||||||
|
func WithInitialData[K comparable, V any](initialData ...Pair[K, V]) InitOption[K, V] {
|
||||||
|
return func(c *initConfig[K, V]) {
|
||||||
|
c.initialData = initialData
|
||||||
|
if c.capacity < len(initialData) {
|
||||||
|
c.capacity = len(initialData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new OrderedMap.
|
||||||
|
// options can either be one or several InitOption[K, V], or a single integer,
|
||||||
|
// which is then interpreted as a capacity hint, à la make(map[K]V, capacity).
|
||||||
|
func New[K comparable, V any](options ...any) *OrderedMap[K, V] {
|
||||||
|
orderedMap := &OrderedMap[K, V]{}
|
||||||
|
|
||||||
|
var config initConfig[K, V]
|
||||||
|
for _, untypedOption := range options {
|
||||||
|
switch option := untypedOption.(type) {
|
||||||
|
case int:
|
||||||
|
if len(options) != 1 {
|
||||||
|
invalidOption()
|
||||||
|
}
|
||||||
|
config.capacity = option
|
||||||
|
|
||||||
|
case InitOption[K, V]:
|
||||||
|
option(&config)
|
||||||
|
|
||||||
|
default:
|
||||||
|
invalidOption()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
orderedMap.initialize(config.capacity)
|
||||||
|
orderedMap.AddPairs(config.initialData...)
|
||||||
|
|
||||||
|
return orderedMap
|
||||||
|
}
|
||||||
|
|
||||||
|
const invalidOptionMessage = `when using orderedmap.New[K,V]() with options, either provide one or several InitOption[K, V]; or a single integer which is then interpreted as a capacity hint, à la make(map[K]V, capacity).` //nolint:lll
|
||||||
|
|
||||||
|
func invalidOption() { panic(invalidOptionMessage) }
|
||||||
|
|
||||||
|
func (om *OrderedMap[K, V]) initialize(capacity int) {
|
||||||
|
om.pairs = make(map[K]*Pair[K, V], capacity)
|
||||||
|
om.list = list.New[*Pair[K, V]]()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get looks for the given key, and returns the value associated with it,
|
||||||
|
// or V's nil value if not found. The boolean it returns says whether the key is present in the map.
|
||||||
|
func (om *OrderedMap[K, V]) Get(key K) (val V, present bool) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
return pair.Value, true
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load is an alias for Get, mostly to present an API similar to `sync.Map`'s.
|
||||||
|
func (om *OrderedMap[K, V]) Load(key K) (V, bool) {
|
||||||
|
return om.Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the value associated with the given key or the zero value.
|
||||||
|
func (om *OrderedMap[K, V]) Value(key K) (val V) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
val = pair.Value
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPair looks for the given key, and returns the pair associated with it,
|
||||||
|
// or nil if not found. The Pair struct can then be used to iterate over the ordered map
|
||||||
|
// from that point, either forward or backward.
|
||||||
|
func (om *OrderedMap[K, V]) GetPair(key K) *Pair[K, V] {
|
||||||
|
return om.pairs[key]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the key-value pair, and returns what `Get` would have returned
|
||||||
|
// on that key prior to the call to `Set`.
|
||||||
|
func (om *OrderedMap[K, V]) Set(key K, value V) (val V, present bool) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
oldValue := pair.Value
|
||||||
|
pair.Value = value
|
||||||
|
return oldValue, true
|
||||||
|
}
|
||||||
|
|
||||||
|
pair := &Pair[K, V]{
|
||||||
|
Key: key,
|
||||||
|
Value: value,
|
||||||
|
}
|
||||||
|
pair.element = om.list.PushBack(pair)
|
||||||
|
om.pairs[key] = pair
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPairs allows setting multiple pairs at a time. It's equivalent to calling
|
||||||
|
// Set on each pair sequentially.
|
||||||
|
func (om *OrderedMap[K, V]) AddPairs(pairs ...Pair[K, V]) {
|
||||||
|
for _, pair := range pairs {
|
||||||
|
om.Set(pair.Key, pair.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store is an alias for Set, mostly to present an API similar to `sync.Map`'s.
|
||||||
|
func (om *OrderedMap[K, V]) Store(key K, value V) (V, bool) {
|
||||||
|
return om.Set(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the key-value pair, and returns what `Get` would have returned
|
||||||
|
// on that key prior to the call to `Delete`.
|
||||||
|
func (om *OrderedMap[K, V]) Delete(key K) (val V, present bool) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
om.list.Remove(pair.element)
|
||||||
|
delete(om.pairs, key)
|
||||||
|
return pair.Value, true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the length of the ordered map.
|
||||||
|
func (om *OrderedMap[K, V]) Len() int {
|
||||||
|
if om == nil || om.pairs == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return len(om.pairs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Oldest returns a pointer to the oldest pair. It's meant to be used to iterate on the ordered map's
|
||||||
|
// pairs from the oldest to the newest, e.g.:
|
||||||
|
// for pair := orderedMap.Oldest(); pair != nil; pair = pair.Next() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) }
|
||||||
|
func (om *OrderedMap[K, V]) Oldest() *Pair[K, V] {
|
||||||
|
if om == nil || om.list == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return listElementToPair(om.list.Front())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Newest returns a pointer to the newest pair. It's meant to be used to iterate on the ordered map's
|
||||||
|
// pairs from the newest to the oldest, e.g.:
|
||||||
|
// for pair := orderedMap.Newest(); pair != nil; pair = pair.Prev() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) }
|
||||||
|
func (om *OrderedMap[K, V]) Newest() *Pair[K, V] {
|
||||||
|
if om == nil || om.list == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return listElementToPair(om.list.Back())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns a pointer to the next pair.
|
||||||
|
func (p *Pair[K, V]) Next() *Pair[K, V] {
|
||||||
|
return listElementToPair(p.element.Next())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prev returns a pointer to the previous pair.
|
||||||
|
func (p *Pair[K, V]) Prev() *Pair[K, V] {
|
||||||
|
return listElementToPair(p.element.Prev())
|
||||||
|
}
|
||||||
|
|
||||||
|
func listElementToPair[K comparable, V any](element *list.Element[*Pair[K, V]]) *Pair[K, V] {
|
||||||
|
if element == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return element.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyNotFoundError may be returned by functions in this package when they're called with keys that are not present
|
||||||
|
// in the map.
|
||||||
|
type KeyNotFoundError[K comparable] struct {
|
||||||
|
MissingKey K
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *KeyNotFoundError[K]) Error() string {
|
||||||
|
return fmt.Sprintf("missing key: %v", e.MissingKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveAfter moves the value associated with key to its new position after the one associated with markKey.
|
||||||
|
// Returns an error iff key or markKey are not present in the map. If an error is returned,
|
||||||
|
// it will be a KeyNotFoundError.
|
||||||
|
func (om *OrderedMap[K, V]) MoveAfter(key, markKey K) error {
|
||||||
|
elements, err := om.getElements(key, markKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
om.list.MoveAfter(elements[0], elements[1])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveBefore moves the value associated with key to its new position before the one associated with markKey.
|
||||||
|
// Returns an error iff key or markKey are not present in the map. If an error is returned,
|
||||||
|
// it will be a KeyNotFoundError.
|
||||||
|
func (om *OrderedMap[K, V]) MoveBefore(key, markKey K) error {
|
||||||
|
elements, err := om.getElements(key, markKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
om.list.MoveBefore(elements[0], elements[1])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (om *OrderedMap[K, V]) getElements(keys ...K) ([]*list.Element[*Pair[K, V]], error) {
|
||||||
|
elements := make([]*list.Element[*Pair[K, V]], len(keys))
|
||||||
|
for i, k := range keys {
|
||||||
|
pair, present := om.pairs[k]
|
||||||
|
if !present {
|
||||||
|
return nil, &KeyNotFoundError[K]{k}
|
||||||
|
}
|
||||||
|
elements[i] = pair.element
|
||||||
|
}
|
||||||
|
return elements, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveToBack moves the value associated with key to the back of the ordered map,
|
||||||
|
// i.e. makes it the newest pair in the map.
|
||||||
|
// Returns an error iff key is not present in the map. If an error is returned,
|
||||||
|
// it will be a KeyNotFoundError.
|
||||||
|
func (om *OrderedMap[K, V]) MoveToBack(key K) error {
|
||||||
|
_, err := om.GetAndMoveToBack(key)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveToFront moves the value associated with key to the front of the ordered map,
|
||||||
|
// i.e. makes it the oldest pair in the map.
|
||||||
|
// Returns an error iff key is not present in the map. If an error is returned,
|
||||||
|
// it will be a KeyNotFoundError.
|
||||||
|
func (om *OrderedMap[K, V]) MoveToFront(key K) error {
|
||||||
|
_, err := om.GetAndMoveToFront(key)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAndMoveToBack combines Get and MoveToBack in the same call. If an error is returned,
|
||||||
|
// it will be a KeyNotFoundError.
|
||||||
|
func (om *OrderedMap[K, V]) GetAndMoveToBack(key K) (val V, err error) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
val = pair.Value
|
||||||
|
om.list.MoveToBack(pair.element)
|
||||||
|
} else {
|
||||||
|
err = &KeyNotFoundError[K]{key}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAndMoveToFront combines Get and MoveToFront in the same call. If an error is returned,
|
||||||
|
// it will be a KeyNotFoundError.
|
||||||
|
func (om *OrderedMap[K, V]) GetAndMoveToFront(key K) (val V, err error) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
val = pair.Value
|
||||||
|
om.list.MoveToFront(pair.element)
|
||||||
|
} else {
|
||||||
|
err = &KeyNotFoundError[K]{key}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromOldest returns an iterator over all the key-value pairs in the map, starting from the oldest pair.
|
||||||
|
func (om *OrderedMap[K, V]) FromOldest() iter.Seq2[K, V] {
|
||||||
|
return func(yield func(K, V) bool) {
|
||||||
|
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
if !yield(pair.Key, pair.Value) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromNewest returns an iterator over all the key-value pairs in the map, starting from the newest pair.
|
||||||
|
func (om *OrderedMap[K, V]) FromNewest() iter.Seq2[K, V] {
|
||||||
|
return func(yield func(K, V) bool) {
|
||||||
|
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
|
||||||
|
if !yield(pair.Key, pair.Value) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeysFromOldest returns an iterator over all the keys in the map, starting from the oldest pair.
|
||||||
|
func (om *OrderedMap[K, V]) KeysFromOldest() iter.Seq[K] {
|
||||||
|
return func(yield func(K) bool) {
|
||||||
|
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
if !yield(pair.Key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeysFromNewest returns an iterator over all the keys in the map, starting from the newest pair.
|
||||||
|
func (om *OrderedMap[K, V]) KeysFromNewest() iter.Seq[K] {
|
||||||
|
return func(yield func(K) bool) {
|
||||||
|
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
|
||||||
|
if !yield(pair.Key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValuesFromOldest returns an iterator over all the values in the map, starting from the oldest pair.
|
||||||
|
func (om *OrderedMap[K, V]) ValuesFromOldest() iter.Seq[V] {
|
||||||
|
return func(yield func(V) bool) {
|
||||||
|
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
if !yield(pair.Value) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValuesFromNewest returns an iterator over all the values in the map, starting from the newest pair.
|
||||||
|
func (om *OrderedMap[K, V]) ValuesFromNewest() iter.Seq[V] {
|
||||||
|
return func(yield func(V) bool) {
|
||||||
|
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
|
||||||
|
if !yield(pair.Value) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// From creates a new OrderedMap from an iterator over key-value pairs.
|
||||||
|
func From[K comparable, V any](i iter.Seq2[K, V]) *OrderedMap[K, V] {
|
||||||
|
oMap := New[K, V]()
|
||||||
|
|
||||||
|
for k, v := range i {
|
||||||
|
oMap.Set(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return oMap
|
||||||
|
}
|
71
vendor/github.com/wk8/go-ordered-map/v2/yaml.go
generated
vendored
Normal file
71
vendor/github.com/wk8/go-ordered-map/v2/yaml.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package orderedmap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ yaml.Marshaler = &OrderedMap[int, any]{}
|
||||||
|
_ yaml.Unmarshaler = &OrderedMap[int, any]{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalYAML implements the yaml.Marshaler interface.
|
||||||
|
func (om *OrderedMap[K, V]) MarshalYAML() (interface{}, error) {
|
||||||
|
if om == nil {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
node := yaml.Node{
|
||||||
|
Kind: yaml.MappingNode,
|
||||||
|
}
|
||||||
|
|
||||||
|
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
key, value := pair.Key, pair.Value
|
||||||
|
|
||||||
|
keyNode := &yaml.Node{}
|
||||||
|
|
||||||
|
// serialize key to yaml, then deserialize it back into the node
|
||||||
|
// this is a hack to get the correct tag for the key
|
||||||
|
if err := keyNode.Encode(key); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
valueNode := &yaml.Node{}
|
||||||
|
if err := valueNode.Encode(value); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Content = append(node.Content, keyNode, valueNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
|
func (om *OrderedMap[K, V]) UnmarshalYAML(value *yaml.Node) error {
|
||||||
|
if value.Kind != yaml.MappingNode {
|
||||||
|
return fmt.Errorf("pipeline must contain YAML mapping, has %v", value.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
if om.list == nil {
|
||||||
|
om.initialize(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
for index := 0; index < len(value.Content); index += 2 {
|
||||||
|
var key K
|
||||||
|
var val V
|
||||||
|
|
||||||
|
if err := value.Content[index].Decode(&key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := value.Content[index+1].Decode(&val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
om.Set(key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
26
vendor/modules.txt
vendored
26
vendor/modules.txt
vendored
|
@ -91,6 +91,9 @@ github.com/asaskevich/govalidator
|
||||||
## explicit
|
## explicit
|
||||||
github.com/aymerick/douceur/css
|
github.com/aymerick/douceur/css
|
||||||
github.com/aymerick/douceur/parser
|
github.com/aymerick/douceur/parser
|
||||||
|
# github.com/bahlo/generic-list-go v0.2.0
|
||||||
|
## explicit; go 1.18
|
||||||
|
github.com/bahlo/generic-list-go
|
||||||
# github.com/beorn7/perks v1.0.1
|
# github.com/beorn7/perks v1.0.1
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
github.com/beorn7/perks/quantile
|
github.com/beorn7/perks/quantile
|
||||||
|
@ -98,6 +101,9 @@ github.com/beorn7/perks/quantile
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/buckket/go-blurhash
|
github.com/buckket/go-blurhash
|
||||||
github.com/buckket/go-blurhash/base83
|
github.com/buckket/go-blurhash/base83
|
||||||
|
# github.com/buger/jsonparser v1.1.1
|
||||||
|
## explicit; go 1.13
|
||||||
|
github.com/buger/jsonparser
|
||||||
# github.com/bytedance/sonic v1.11.6
|
# github.com/bytedance/sonic v1.11.6
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
github.com/bytedance/sonic
|
github.com/bytedance/sonic
|
||||||
|
@ -919,8 +925,8 @@ github.com/ugorji/go/codec
|
||||||
github.com/ulule/limiter/v3
|
github.com/ulule/limiter/v3
|
||||||
github.com/ulule/limiter/v3/drivers/store/common
|
github.com/ulule/limiter/v3/drivers/store/common
|
||||||
github.com/ulule/limiter/v3/drivers/store/memory
|
github.com/ulule/limiter/v3/drivers/store/memory
|
||||||
# github.com/uptrace/bun v1.2.5
|
# github.com/uptrace/bun v1.2.6
|
||||||
## explicit; go 1.22
|
## explicit; go 1.23
|
||||||
github.com/uptrace/bun
|
github.com/uptrace/bun
|
||||||
github.com/uptrace/bun/dialect
|
github.com/uptrace/bun/dialect
|
||||||
github.com/uptrace/bun/dialect/feature
|
github.com/uptrace/bun/dialect/feature
|
||||||
|
@ -930,15 +936,16 @@ github.com/uptrace/bun/internal
|
||||||
github.com/uptrace/bun/internal/parser
|
github.com/uptrace/bun/internal/parser
|
||||||
github.com/uptrace/bun/internal/tagparser
|
github.com/uptrace/bun/internal/tagparser
|
||||||
github.com/uptrace/bun/migrate
|
github.com/uptrace/bun/migrate
|
||||||
|
github.com/uptrace/bun/migrate/sqlschema
|
||||||
github.com/uptrace/bun/schema
|
github.com/uptrace/bun/schema
|
||||||
# github.com/uptrace/bun/dialect/pgdialect v1.2.5
|
# github.com/uptrace/bun/dialect/pgdialect v1.2.6
|
||||||
## explicit; go 1.22
|
## explicit; go 1.23
|
||||||
github.com/uptrace/bun/dialect/pgdialect
|
github.com/uptrace/bun/dialect/pgdialect
|
||||||
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.5
|
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.6
|
||||||
## explicit; go 1.22
|
## explicit; go 1.23
|
||||||
github.com/uptrace/bun/dialect/sqlitedialect
|
github.com/uptrace/bun/dialect/sqlitedialect
|
||||||
# github.com/uptrace/bun/extra/bunotel v1.2.5
|
# github.com/uptrace/bun/extra/bunotel v1.2.6
|
||||||
## explicit; go 1.22
|
## explicit; go 1.23
|
||||||
github.com/uptrace/bun/extra/bunotel
|
github.com/uptrace/bun/extra/bunotel
|
||||||
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2
|
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2
|
||||||
## explicit; go 1.22
|
## explicit; go 1.22
|
||||||
|
@ -955,6 +962,9 @@ github.com/vmihailenco/tagparser/v2/internal/parser
|
||||||
# github.com/wagslane/go-password-validator v0.3.0
|
# github.com/wagslane/go-password-validator v0.3.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
github.com/wagslane/go-password-validator
|
github.com/wagslane/go-password-validator
|
||||||
|
# github.com/wk8/go-ordered-map/v2 v2.1.9-0.20240816141633-0a40785b4f41
|
||||||
|
## explicit; go 1.23
|
||||||
|
github.com/wk8/go-ordered-map/v2
|
||||||
# github.com/yuin/goldmark v1.7.8
|
# github.com/yuin/goldmark v1.7.8
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/yuin/goldmark
|
github.com/yuin/goldmark
|
||||||
|
|
Loading…
Reference in a new issue