bumps uptrace/bun deps to v1.2.8 (#3698)

This commit is contained in:
kim 2025-01-27 15:54:51 +00:00 committed by GitHub
parent 7b7fc528f1
commit 3617e27afa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
35 changed files with 760 additions and 220 deletions

16
go.mod
View file

@ -78,20 +78,20 @@ require (
github.com/tetratelabs/wazero v1.8.2 github.com/tetratelabs/wazero v1.8.2
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
github.com/ulule/limiter/v3 v3.11.2 github.com/ulule/limiter/v3 v3.11.2
github.com/uptrace/bun v1.2.8 github.com/uptrace/bun v1.2.9
github.com/uptrace/bun/dialect/pgdialect v1.2.8 github.com/uptrace/bun/dialect/pgdialect v1.2.9
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8 github.com/uptrace/bun/dialect/sqlitedialect v1.2.9
github.com/uptrace/bun/extra/bunotel v1.2.8 github.com/uptrace/bun/extra/bunotel v1.2.9
github.com/wagslane/go-password-validator v0.3.0 github.com/wagslane/go-password-validator v0.3.0
github.com/yuin/goldmark v1.7.8 github.com/yuin/goldmark v1.7.8
go.opentelemetry.io/otel v1.33.0 go.opentelemetry.io/otel v1.34.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0
go.opentelemetry.io/otel/exporters/prometheus v0.51.0 go.opentelemetry.io/otel/exporters/prometheus v0.51.0
go.opentelemetry.io/otel/metric v1.33.0 go.opentelemetry.io/otel/metric v1.34.0
go.opentelemetry.io/otel/sdk v1.32.0 go.opentelemetry.io/otel/sdk v1.32.0
go.opentelemetry.io/otel/sdk/metric v1.32.0 go.opentelemetry.io/otel/sdk/metric v1.32.0
go.opentelemetry.io/otel/trace v1.33.0 go.opentelemetry.io/otel/trace v1.34.0
go.uber.org/automaxprocs v1.6.0 go.uber.org/automaxprocs v1.6.0
golang.org/x/crypto v0.32.0 golang.org/x/crypto v0.32.0
golang.org/x/image v0.23.0 golang.org/x/image v0.23.0
@ -202,7 +202,7 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/common v0.59.1 // indirect
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/puzpuzpuz/xsync/v3 v3.4.0 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.0 // indirect
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a // indirect github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a // indirect

20
go.sum generated
View file

@ -471,8 +471,8 @@ github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJ
github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4= github.com/puzpuzpuz/xsync/v3 v3.5.0 h1:i+cMcpEDY1BkNm7lPDkCtE4oElsYLn+EKF8kAu2vXT4=
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/puzpuzpuz/xsync/v3 v3.5.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc= github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg= github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
@ -586,14 +586,14 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA= github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA=
github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI= github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI=
github.com/uptrace/bun v1.2.8 h1:HEiLvy9wc7ehU5S02+O6NdV5BLz48lL4REPhTkMX3Dg= github.com/uptrace/bun v1.2.9 h1:OOt2DlIcRUMSZPr6iXDFg/LaQd59kOxbAjpIVHddKRs=
github.com/uptrace/bun v1.2.8/go.mod h1:JBq0uBKsKqNT0Ccce1IAFZY337Wkf08c6F6qlmfOHE8= github.com/uptrace/bun v1.2.9/go.mod h1:r2ZaaGs9Ru5bpGTr8GQfp8jp+TlCav9grYCPOu2CJSg=
github.com/uptrace/bun/dialect/pgdialect v1.2.8 h1:9n3qVh6yc+u7F3lpXzsWrAFJG1yLHUC2thjCCVEDpM8= github.com/uptrace/bun/dialect/pgdialect v1.2.9 h1:caf5uFbOGiXvadV6pA5gn87k0awFFxL1kuuY3SpxnWk=
github.com/uptrace/bun/dialect/pgdialect v1.2.8/go.mod h1:plksD43MjAlPGYLD9/SzsLUpGH5poXE9IB1+ka/sEzE= github.com/uptrace/bun/dialect/pgdialect v1.2.9/go.mod h1:m7L9JtOp/Lt8HccET70ULxplMweE/u0S9lNUSxz2duo=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8 h1:Huqw7YhLFTbocbSv8NETYYXqKtwLa6XsciCWtjzWSWU= github.com/uptrace/bun/dialect/sqlitedialect v1.2.9 h1:HLzGWXBh07sT8zhVPy6veYbbGrAtYq0KzyRHXBj+GjA=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8/go.mod h1:ni7h2uwIc5zPhxgmCMTEbefONc4XsVr/ATfz1Q7d3CE= github.com/uptrace/bun/dialect/sqlitedialect v1.2.9/go.mod h1:dUR+ecoCWA0FIa9vhQVRnGtYYPpuCLJoEEtX9E1aiBU=
github.com/uptrace/bun/extra/bunotel v1.2.8 h1:mu98xQ2EcmkeNGT+YjVtMludtZNHfhfHqhrS77mk4YM= github.com/uptrace/bun/extra/bunotel v1.2.9 h1:BGGrBga+iVL78SGiMpLt2N9MAKvrG3f8wLk8zCLwFJg=
github.com/uptrace/bun/extra/bunotel v1.2.8/go.mod h1:NSjzSfYdDg0WSiY54pFp4ykGoGUmbc/xYQ7AsdyslHQ= github.com/uptrace/bun/extra/bunotel v1.2.9/go.mod h1:6dVl5Ko6xOhuoqUPWHpfFrntBDwmOnq0OMiR/SGwAC8=
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c= github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c=
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ= github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=

View file

@ -80,7 +80,14 @@ m.Store(Point{42, 42}, 42)
v, ok := m.Load(point{42, 42}) v, ok := m.Load(point{42, 42})
``` ```
Both maps use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers: Apart from `Range` method available for map iteration, there are also `ToPlainMap`/`ToPlainMapOf` utility functions to convert a `Map`/`MapOf` to a built-in Go's `map`:
```go
m := xsync.NewMapOf[int, int]()
m.Store(42, 42)
pm := xsync.ToPlainMapOf(m)
```
Both `Map` and `MapOf` use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers:
```go ```go
m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 { m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 {
@ -93,28 +100,50 @@ m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 {
When benchmarking concurrent maps, make sure to configure all of the competitors with the same hash function or, at least, take hash function performance into the consideration. When benchmarking concurrent maps, make sure to configure all of the competitors with the same hash function or, at least, take hash function performance into the consideration.
### SPSCQueue
A `SPSCQueue` is a bounded single-producer single-consumer concurrent queue. This means that not more than a single goroutine must be publishing items to the queue while not more than a single goroutine must be consuming those items.
```go
q := xsync.NewSPSCQueue(1024)
// producer inserts an item into the queue
// optimistic insertion attempt; doesn't block
inserted := q.TryEnqueue("bar")
// consumer obtains an item from the queue
// optimistic obtain attempt; doesn't block
item, ok := q.TryDequeue() // interface{} pointing to a string
```
`SPSCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
```go
q := xsync.NewSPSCQueueOf[string](1024)
inserted := q.TryEnqueue("foo")
item, ok := q.TryDequeue() // string
```
The queue is based on the data structure from this [article](https://rigtorp.se/ringbuffer). The idea is to reduce the CPU cache coherency traffic by keeping cached copies of read and write indexes used by producer and consumer respectively.
### MPMCQueue ### MPMCQueue
A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue. A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue.
```go ```go
q := xsync.NewMPMCQueue(1024) q := xsync.NewMPMCQueue(1024)
// producer inserts an item into the queue // producer optimistically inserts an item into the queue
q.Enqueue("foo")
// optimistic insertion attempt; doesn't block // optimistic insertion attempt; doesn't block
inserted := q.TryEnqueue("bar") inserted := q.TryEnqueue("bar")
// consumer obtains an item from the queue // consumer obtains an item from the queue
item := q.Dequeue() // interface{} pointing to a string
// optimistic obtain attempt; doesn't block // optimistic obtain attempt; doesn't block
item, ok := q.TryDequeue() item, ok := q.TryDequeue() // interface{} pointing to a string
``` ```
`MPMCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later. `MPMCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
```go ```go
q := xsync.NewMPMCQueueOf[string](1024) q := xsync.NewMPMCQueueOf[string](1024)
q.Enqueue("foo") inserted := q.TryEnqueue("foo")
item := q.Dequeue() // string item, ok := q.TryDequeue() // string
``` ```
The queue is based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers. The queue is based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers.

View file

@ -200,6 +200,21 @@ func newMapTable(minTableLen int) *mapTable {
return t return t
} }
// ToPlainMap returns a native map with a copy of xsync Map's
// contents. The copied xsync Map should not be modified while
// this call is made. If the copied Map is modified, the copying
// behavior is the same as in the Range method.
func ToPlainMap(m *Map) map[string]interface{} {
pm := make(map[string]interface{})
if m != nil {
m.Range(func(key string, value interface{}) bool {
pm[key] = value
return true
})
}
return pm
}
// Load returns the value stored in the map for a key, or nil if no // Load returns the value stored in the map for a key, or nil if no
// value is present. // value is present.
// The ok result indicates whether value was found in the map. // The ok result indicates whether value was found in the map.
@ -279,6 +294,34 @@ func(interface{}, bool) (interface{}, bool) {
) )
} }
// LoadOrTryCompute returns the existing value for the key if present.
// Otherwise, it tries to compute the value using the provided function
// and, if success, returns the computed value. The loaded result is true
// if the value was loaded, false if stored. If the compute attempt was
// cancelled, a nil will be returned.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *Map) LoadOrTryCompute(
key string,
valueFn func() (newValue interface{}, cancel bool),
) (value interface{}, loaded bool) {
return m.doCompute(
key,
func(interface{}, bool) (interface{}, bool) {
nv, c := valueFn()
if !c {
return nv, false
}
return nil, true
},
true,
false,
)
}
// LoadOrCompute returns the existing value for the key if present. // LoadOrCompute returns the existing value for the key if present.
// Otherwise, it computes the value using the provided function and // Otherwise, it computes the value using the provided function and
// returns the computed value. The loaded result is true if the value // returns the computed value. The loaded result is true if the value
@ -447,11 +490,11 @@ func (m *Map) doCompute(
if b.next == nil { if b.next == nil {
if emptyb != nil { if emptyb != nil {
// Insertion into an existing bucket. // Insertion into an existing bucket.
var zeroedV interface{} var zeroV interface{}
newValue, del := valueFn(zeroedV, false) newValue, del := valueFn(zeroV, false)
if del { if del {
unlockBucket(&rootb.topHashMutex) unlockBucket(&rootb.topHashMutex)
return zeroedV, false return zeroV, false
} }
// First we update the value, then the key. // First we update the value, then the key.
// This is important for atomic snapshot states. // This is important for atomic snapshot states.
@ -471,8 +514,8 @@ func (m *Map) doCompute(
goto compute_attempt goto compute_attempt
} }
// Insertion into a new bucket. // Insertion into a new bucket.
var zeroedV interface{} var zeroV interface{}
newValue, del := valueFn(zeroedV, false) newValue, del := valueFn(zeroV, false)
if del { if del {
unlockBucket(&rootb.topHashMutex) unlockBucket(&rootb.topHashMutex)
return newValue, false return newValue, false

View file

@ -149,6 +149,21 @@ func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
return t return t
} }
// ToPlainMapOf returns a native map with a copy of xsync Map's
// contents. The copied xsync Map should not be modified while
// this call is made. If the copied Map is modified, the copying
// behavior is the same as in the Range method.
func ToPlainMapOf[K comparable, V any](m *MapOf[K, V]) map[K]V {
pm := make(map[K]V)
if m != nil {
m.Range(func(key K, value V) bool {
pm[key] = value
return true
})
}
return pm
}
// Load returns the value stored in the map for a key, or zero value // Load returns the value stored in the map for a key, or zero value
// of type V if no value is present. // of type V if no value is present.
// The ok result indicates whether value was found in the map. // The ok result indicates whether value was found in the map.
@ -243,6 +258,34 @@ func(V, bool) (V, bool) {
) )
} }
// LoadOrTryCompute returns the existing value for the key if present.
// Otherwise, it tries to compute the value using the provided function
// and, if success, returns the computed value. The loaded result is true
// if the value was loaded, false if stored. If the compute attempt was
// cancelled, a zero value of type V will be returned.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *MapOf[K, V]) LoadOrTryCompute(
key K,
valueFn func() (newValue V, cancel bool),
) (value V, loaded bool) {
return m.doCompute(
key,
func(V, bool) (V, bool) {
nv, c := valueFn()
if !c {
return nv, false
}
return nv, true // nv is ignored
},
true,
false,
)
}
// Compute either sets the computed new value for the key or deletes // Compute either sets the computed new value for the key or deletes
// the value for the key. When the delete result of the valueFn function // the value for the key. When the delete result of the valueFn function
// is set to true, the value will be deleted, if it exists. When delete // is set to true, the value will be deleted, if it exists. When delete
@ -390,11 +433,11 @@ func (m *MapOf[K, V]) doCompute(
if b.next == nil { if b.next == nil {
if emptyb != nil { if emptyb != nil {
// Insertion into an existing bucket. // Insertion into an existing bucket.
var zeroedV V var zeroV V
newValue, del := valueFn(zeroedV, false) newValue, del := valueFn(zeroV, false)
if del { if del {
rootb.mu.Unlock() rootb.mu.Unlock()
return zeroedV, false return zeroV, false
} }
newe := new(entryOf[K, V]) newe := new(entryOf[K, V])
newe.key = key newe.key = key
@ -414,8 +457,8 @@ func (m *MapOf[K, V]) doCompute(
goto compute_attempt goto compute_attempt
} }
// Insertion into a new bucket. // Insertion into a new bucket.
var zeroedV V var zeroV V
newValue, del := valueFn(zeroedV, false) newValue, del := valueFn(zeroV, false)
if del { if del {
rootb.mu.Unlock() rootb.mu.Unlock()
return newValue, false return newValue, false

View file

@ -50,6 +50,8 @@ func NewMPMCQueue(capacity int) *MPMCQueue {
// Enqueue inserts the given item into the queue. // Enqueue inserts the given item into the queue.
// Blocks, if the queue is full. // Blocks, if the queue is full.
//
// Deprecated: use TryEnqueue in combination with runtime.Gosched().
func (q *MPMCQueue) Enqueue(item interface{}) { func (q *MPMCQueue) Enqueue(item interface{}) {
head := atomic.AddUint64(&q.head, 1) - 1 head := atomic.AddUint64(&q.head, 1) - 1
slot := &q.slots[q.idx(head)] slot := &q.slots[q.idx(head)]
@ -63,6 +65,8 @@ func (q *MPMCQueue) Enqueue(item interface{}) {
// Dequeue retrieves and removes the item from the head of the queue. // Dequeue retrieves and removes the item from the head of the queue.
// Blocks, if the queue is empty. // Blocks, if the queue is empty.
//
// Deprecated: use TryDequeue in combination with runtime.Gosched().
func (q *MPMCQueue) Dequeue() interface{} { func (q *MPMCQueue) Dequeue() interface{} {
tail := atomic.AddUint64(&q.tail, 1) - 1 tail := atomic.AddUint64(&q.tail, 1) - 1
slot := &q.slots[q.idx(tail)] slot := &q.slots[q.idx(tail)]
@ -81,24 +85,16 @@ func (q *MPMCQueue) Dequeue() interface{} {
// full and the item was inserted. // full and the item was inserted.
func (q *MPMCQueue) TryEnqueue(item interface{}) bool { func (q *MPMCQueue) TryEnqueue(item interface{}) bool {
head := atomic.LoadUint64(&q.head) head := atomic.LoadUint64(&q.head)
for { slot := &q.slots[q.idx(head)]
slot := &q.slots[q.idx(head)] turn := q.turn(head) * 2
turn := q.turn(head) * 2 if atomic.LoadUint64(&slot.turn) == turn {
if atomic.LoadUint64(&slot.turn) == turn { if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
if atomic.CompareAndSwapUint64(&q.head, head, head+1) { slot.item = item
slot.item = item atomic.StoreUint64(&slot.turn, turn+1)
atomic.StoreUint64(&slot.turn, turn+1) return true
return true
}
} else {
prevHead := head
head = atomic.LoadUint64(&q.head)
if head == prevHead {
return false
}
} }
runtime.Gosched()
} }
return false
} }
// TryDequeue retrieves and removes the item from the head of the // TryDequeue retrieves and removes the item from the head of the
@ -106,26 +102,18 @@ func (q *MPMCQueue) TryEnqueue(item interface{}) bool {
// indicates that the queue isn't empty and an item was retrieved. // indicates that the queue isn't empty and an item was retrieved.
func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) { func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) {
tail := atomic.LoadUint64(&q.tail) tail := atomic.LoadUint64(&q.tail)
for { slot := &q.slots[q.idx(tail)]
slot := &q.slots[q.idx(tail)] turn := q.turn(tail)*2 + 1
turn := q.turn(tail)*2 + 1 if atomic.LoadUint64(&slot.turn) == turn {
if atomic.LoadUint64(&slot.turn) == turn { if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) { item = slot.item
item = slot.item ok = true
ok = true slot.item = nil
slot.item = nil atomic.StoreUint64(&slot.turn, turn+1)
atomic.StoreUint64(&slot.turn, turn+1) return
return
}
} else {
prevTail := tail
tail = atomic.LoadUint64(&q.tail)
if tail == prevTail {
return
}
} }
runtime.Gosched()
} }
return
} }
func (q *MPMCQueue) idx(i uint64) uint64 { func (q *MPMCQueue) idx(i uint64) uint64 {

View file

@ -12,7 +12,7 @@
// A MPMCQueueOf is a bounded multi-producer multi-consumer concurrent // A MPMCQueueOf is a bounded multi-producer multi-consumer concurrent
// queue. It's a generic version of MPMCQueue. // queue. It's a generic version of MPMCQueue.
// //
// MPMCQueue instances must be created with NewMPMCQueueOf function. // MPMCQueueOf instances must be created with NewMPMCQueueOf function.
// A MPMCQueueOf must not be copied after first use. // A MPMCQueueOf must not be copied after first use.
// //
// Based on the data structure from the following C++ library: // Based on the data structure from the following C++ library:
@ -61,6 +61,8 @@ func NewMPMCQueueOf[I any](capacity int) *MPMCQueueOf[I] {
// Enqueue inserts the given item into the queue. // Enqueue inserts the given item into the queue.
// Blocks, if the queue is full. // Blocks, if the queue is full.
//
// Deprecated: use TryEnqueue in combination with runtime.Gosched().
func (q *MPMCQueueOf[I]) Enqueue(item I) { func (q *MPMCQueueOf[I]) Enqueue(item I) {
head := atomic.AddUint64(&q.head, 1) - 1 head := atomic.AddUint64(&q.head, 1) - 1
slot := &q.slots[q.idx(head)] slot := &q.slots[q.idx(head)]
@ -74,8 +76,10 @@ func (q *MPMCQueueOf[I]) Enqueue(item I) {
// Dequeue retrieves and removes the item from the head of the queue. // Dequeue retrieves and removes the item from the head of the queue.
// Blocks, if the queue is empty. // Blocks, if the queue is empty.
//
// Deprecated: use TryDequeue in combination with runtime.Gosched().
func (q *MPMCQueueOf[I]) Dequeue() I { func (q *MPMCQueueOf[I]) Dequeue() I {
var zeroedI I var zeroI I
tail := atomic.AddUint64(&q.tail, 1) - 1 tail := atomic.AddUint64(&q.tail, 1) - 1
slot := &q.slots[q.idx(tail)] slot := &q.slots[q.idx(tail)]
turn := q.turn(tail)*2 + 1 turn := q.turn(tail)*2 + 1
@ -83,7 +87,7 @@ func (q *MPMCQueueOf[I]) Dequeue() I {
runtime.Gosched() runtime.Gosched()
} }
item := slot.item item := slot.item
slot.item = zeroedI slot.item = zeroI
slot.turn.Store(turn + 1) slot.turn.Store(turn + 1)
return item return item
} }
@ -93,24 +97,16 @@ func (q *MPMCQueueOf[I]) Dequeue() I {
// full and the item was inserted. // full and the item was inserted.
func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool { func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool {
head := atomic.LoadUint64(&q.head) head := atomic.LoadUint64(&q.head)
for { slot := &q.slots[q.idx(head)]
slot := &q.slots[q.idx(head)] turn := q.turn(head) * 2
turn := q.turn(head) * 2 if slot.turn.Load() == turn {
if slot.turn.Load() == turn { if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
if atomic.CompareAndSwapUint64(&q.head, head, head+1) { slot.item = item
slot.item = item slot.turn.Store(turn + 1)
slot.turn.Store(turn + 1) return true
return true
}
} else {
prevHead := head
head = atomic.LoadUint64(&q.head)
if head == prevHead {
return false
}
} }
runtime.Gosched()
} }
return false
} }
// TryDequeue retrieves and removes the item from the head of the // TryDequeue retrieves and removes the item from the head of the
@ -118,27 +114,19 @@ func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool {
// indicates that the queue isn't empty and an item was retrieved. // indicates that the queue isn't empty and an item was retrieved.
func (q *MPMCQueueOf[I]) TryDequeue() (item I, ok bool) { func (q *MPMCQueueOf[I]) TryDequeue() (item I, ok bool) {
tail := atomic.LoadUint64(&q.tail) tail := atomic.LoadUint64(&q.tail)
for { slot := &q.slots[q.idx(tail)]
slot := &q.slots[q.idx(tail)] turn := q.turn(tail)*2 + 1
turn := q.turn(tail)*2 + 1 if slot.turn.Load() == turn {
if slot.turn.Load() == turn { if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) { var zeroI I
var zeroedI I item = slot.item
item = slot.item ok = true
ok = true slot.item = zeroI
slot.item = zeroedI slot.turn.Store(turn + 1)
slot.turn.Store(turn + 1) return
return
}
} else {
prevTail := tail
tail = atomic.LoadUint64(&q.tail)
if tail == prevTail {
return
}
} }
runtime.Gosched()
} }
return
} }
func (q *MPMCQueueOf[I]) idx(i uint64) uint64 { func (q *MPMCQueueOf[I]) idx(i uint64) uint64 {

92
vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go generated vendored Normal file
View file

@ -0,0 +1,92 @@
package xsync
import (
"sync/atomic"
)
// A SPSCQueue is a bounded single-producer single-consumer concurrent
// queue. This means that not more than a single goroutine must be
// publishing items to the queue while not more than a single goroutine
// must be consuming those items.
//
// SPSCQueue instances must be created with NewSPSCQueue function.
// A SPSCQueue must not be copied after first use.
//
// Based on the data structure from the following article:
// https://rigtorp.se/ringbuffer/
type SPSCQueue struct {
cap uint64
pidx uint64
//lint:ignore U1000 prevents false sharing
pad0 [cacheLineSize - 8]byte
pcachedIdx uint64
//lint:ignore U1000 prevents false sharing
pad1 [cacheLineSize - 8]byte
cidx uint64
//lint:ignore U1000 prevents false sharing
pad2 [cacheLineSize - 8]byte
ccachedIdx uint64
//lint:ignore U1000 prevents false sharing
pad3 [cacheLineSize - 8]byte
items []interface{}
}
// NewSPSCQueue creates a new SPSCQueue instance with the given
// capacity.
func NewSPSCQueue(capacity int) *SPSCQueue {
if capacity < 1 {
panic("capacity must be positive number")
}
return &SPSCQueue{
cap: uint64(capacity + 1),
items: make([]interface{}, capacity+1),
}
}
// TryEnqueue inserts the given item into the queue. Does not block
// and returns immediately. The result indicates that the queue isn't
// full and the item was inserted.
func (q *SPSCQueue) TryEnqueue(item interface{}) bool {
// relaxed memory order would be enough here
idx := atomic.LoadUint64(&q.pidx)
nextIdx := idx + 1
if nextIdx == q.cap {
nextIdx = 0
}
cachedIdx := q.ccachedIdx
if nextIdx == cachedIdx {
cachedIdx = atomic.LoadUint64(&q.cidx)
q.ccachedIdx = cachedIdx
if nextIdx == cachedIdx {
return false
}
}
q.items[idx] = item
atomic.StoreUint64(&q.pidx, nextIdx)
return true
}
// TryDequeue retrieves and removes the item from the head of the
// queue. Does not block and returns immediately. The ok result
// indicates that the queue isn't empty and an item was retrieved.
func (q *SPSCQueue) TryDequeue() (item interface{}, ok bool) {
// relaxed memory order would be enough here
idx := atomic.LoadUint64(&q.cidx)
cachedIdx := q.pcachedIdx
if idx == cachedIdx {
cachedIdx = atomic.LoadUint64(&q.pidx)
q.pcachedIdx = cachedIdx
if idx == cachedIdx {
return
}
}
item = q.items[idx]
q.items[idx] = nil
ok = true
nextIdx := idx + 1
if nextIdx == q.cap {
nextIdx = 0
}
atomic.StoreUint64(&q.cidx, nextIdx)
return
}

96
vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go generated vendored Normal file
View file

@ -0,0 +1,96 @@
//go:build go1.19
// +build go1.19
package xsync
import (
"sync/atomic"
)
// A SPSCQueueOf is a bounded single-producer single-consumer concurrent
// queue. This means that not more than a single goroutine must be
// publishing items to the queue while not more than a single goroutine
// must be consuming those items.
//
// SPSCQueueOf instances must be created with NewSPSCQueueOf function.
// A SPSCQueueOf must not be copied after first use.
//
// Based on the data structure from the following article:
// https://rigtorp.se/ringbuffer/
type SPSCQueueOf[I any] struct {
cap uint64
pidx uint64
//lint:ignore U1000 prevents false sharing
pad0 [cacheLineSize - 8]byte
pcachedIdx uint64
//lint:ignore U1000 prevents false sharing
pad1 [cacheLineSize - 8]byte
cidx uint64
//lint:ignore U1000 prevents false sharing
pad2 [cacheLineSize - 8]byte
ccachedIdx uint64
//lint:ignore U1000 prevents false sharing
pad3 [cacheLineSize - 8]byte
items []I
}
// NewSPSCQueueOf creates a new SPSCQueueOf instance with the given
// capacity.
func NewSPSCQueueOf[I any](capacity int) *SPSCQueueOf[I] {
if capacity < 1 {
panic("capacity must be positive number")
}
return &SPSCQueueOf[I]{
cap: uint64(capacity + 1),
items: make([]I, capacity+1),
}
}
// TryEnqueue inserts the given item into the queue. Does not block
// and returns immediately. The result indicates that the queue isn't
// full and the item was inserted.
func (q *SPSCQueueOf[I]) TryEnqueue(item I) bool {
// relaxed memory order would be enough here
idx := atomic.LoadUint64(&q.pidx)
next_idx := idx + 1
if next_idx == q.cap {
next_idx = 0
}
cached_idx := q.ccachedIdx
if next_idx == cached_idx {
cached_idx = atomic.LoadUint64(&q.cidx)
q.ccachedIdx = cached_idx
if next_idx == cached_idx {
return false
}
}
q.items[idx] = item
atomic.StoreUint64(&q.pidx, next_idx)
return true
}
// TryDequeue retrieves and removes the item from the head of the
// queue. Does not block and returns immediately. The ok result
// indicates that the queue isn't empty and an item was retrieved.
func (q *SPSCQueueOf[I]) TryDequeue() (item I, ok bool) {
// relaxed memory order would be enough here
idx := atomic.LoadUint64(&q.cidx)
cached_idx := q.pcachedIdx
if idx == cached_idx {
cached_idx = atomic.LoadUint64(&q.pidx)
q.pcachedIdx = cached_idx
if idx == cached_idx {
return
}
}
var zeroI I
item = q.items[idx]
q.items[idx] = zeroI
ok = true
next_idx := idx + 1
if next_idx == q.cap {
next_idx = 0
}
atomic.StoreUint64(&q.cidx, next_idx)
return
}

View file

@ -1,3 +1,26 @@
## [1.2.9](https://github.com/uptrace/bun/compare/v1.2.8...v1.2.9) (2025-01-26)
### Bug Fixes
* apply join condition to select with count ([e77b9e7](https://github.com/uptrace/bun/commit/e77b9e72fa5ae8e173d506a4e154ba64214c4aff)), closes [#597](https://github.com/uptrace/bun/issues/597)
* build ([702e525](https://github.com/uptrace/bun/commit/702e525e30ec93b6d4611359518e1008b67744af))
* individual replica timeout ([9f5e8b1](https://github.com/uptrace/bun/commit/9f5e8b1c46673bd1779bd4309a28db33dcd695bf))
* test ([dfc4059](https://github.com/uptrace/bun/commit/dfc405901907419d043bb6ced3ad20c131c1b972))
### Features
* add feature flag AlterColumnExists ([fc35e12](https://github.com/uptrace/bun/commit/fc35e1222242b3d581f0b7496a9021aadfc50b07)), closes [#704](https://github.com/uptrace/bun/issues/704)
* add Options ([815e11a](https://github.com/uptrace/bun/commit/815e11a023d2babf65d528a20ddffc7628636e7e))
* allow to specify read-only replica for SELECTs ([cbbe1e9](https://github.com/uptrace/bun/commit/cbbe1e94fd0c72d1870395a663c8053d7e8c6ace))
* downgrade to use the field in has-many-relation ([91e0d27](https://github.com/uptrace/bun/commit/91e0d2719a5a20b3208cea0232e2dbcb452d6c23)), closes [#1107](https://github.com/uptrace/bun/issues/1107)
* make WithReadOnlyReplica variadic ([4cbb15a](https://github.com/uptrace/bun/commit/4cbb15a53e566e03284253aa46be372338968954))
* **pgdialect:** allow to convert uint to int ([7d22ddd](https://github.com/uptrace/bun/commit/7d22ddd263b28b9fd6e172e0208c124b7c56f111))
* **pgdriver:** improve otel instrumentation ([c40e4f3](https://github.com/uptrace/bun/commit/c40e4f3c50c710903236dc89b56a843a0351a21a))
## [1.2.8](https://github.com/uptrace/bun/compare/v1.2.7...v1.2.8) (2025-01-06) ## [1.2.8](https://github.com/uptrace/bun/compare/v1.2.7...v1.2.8) (2025-01-06)

170
vendor/github.com/uptrace/bun/db.go generated vendored
View file

@ -9,6 +9,7 @@
"reflect" "reflect"
"strings" "strings"
"sync/atomic" "sync/atomic"
"time"
"github.com/uptrace/bun/dialect/feature" "github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal" "github.com/uptrace/bun/internal"
@ -26,32 +27,56 @@ type DBStats struct {
type DBOption func(db *DB) type DBOption func(db *DB)
func WithOptions(opts ...DBOption) DBOption {
return func(db *DB) {
for _, opt := range opts {
opt(db)
}
}
}
func WithDiscardUnknownColumns() DBOption { func WithDiscardUnknownColumns() DBOption {
return func(db *DB) { return func(db *DB) {
db.flags = db.flags.Set(discardUnknownColumns) db.flags = db.flags.Set(discardUnknownColumns)
} }
} }
type DB struct { func WithConnResolver(resolver ConnResolver) DBOption {
*sql.DB return func(db *DB) {
db.resolver = resolver
}
}
dialect schema.Dialect type DB struct {
// Must be a pointer so we copy the whole state, not individual fields.
*noCopyState
queryHooks []QueryHook queryHooks []QueryHook
fmter schema.Formatter fmter schema.Formatter
flags internal.Flag
stats DBStats stats DBStats
} }
// noCopyState contains DB fields that must not be copied on clone(),
// for example, it is forbidden to copy atomic.Pointer.
type noCopyState struct {
*sql.DB
dialect schema.Dialect
resolver ConnResolver
flags internal.Flag
closed atomic.Bool
}
func NewDB(sqldb *sql.DB, dialect schema.Dialect, opts ...DBOption) *DB { func NewDB(sqldb *sql.DB, dialect schema.Dialect, opts ...DBOption) *DB {
dialect.Init(sqldb) dialect.Init(sqldb)
db := &DB{ db := &DB{
DB: sqldb, noCopyState: &noCopyState{
dialect: dialect, DB: sqldb,
fmter: schema.NewFormatter(dialect), dialect: dialect,
},
fmter: schema.NewFormatter(dialect),
} }
for _, opt := range opts { for _, opt := range opts {
@ -69,6 +94,22 @@ func (db *DB) String() string {
return b.String() return b.String()
} }
func (db *DB) Close() error {
if db.closed.Swap(true) {
return nil
}
firstErr := db.DB.Close()
if db.resolver != nil {
if err := db.resolver.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func (db *DB) DBStats() DBStats { func (db *DB) DBStats() DBStats {
return DBStats{ return DBStats{
Queries: atomic.LoadUint32(&db.stats.Queries), Queries: atomic.LoadUint32(&db.stats.Queries),
@ -703,3 +744,116 @@ func (tx Tx) NewDropColumn() *DropColumnQuery {
func (db *DB) makeQueryBytes() []byte { func (db *DB) makeQueryBytes() []byte {
return internal.MakeQueryBytes() return internal.MakeQueryBytes()
} }
//------------------------------------------------------------------------------
// ConnResolver enables routing queries to multiple databases.
type ConnResolver interface {
ResolveConn(query Query) IConn
Close() error
}
// TODO:
// - make monitoring interval configurable
// - make ping timeout configutable
// - allow adding read/write replicas for multi-master replication
type ReadWriteConnResolver struct {
replicas []*sql.DB // read-only replicas
healthyReplicas atomic.Pointer[[]*sql.DB]
nextReplica atomic.Int64
closed atomic.Bool
}
func NewReadWriteConnResolver(opts ...ReadWriteConnResolverOption) *ReadWriteConnResolver {
r := new(ReadWriteConnResolver)
for _, opt := range opts {
opt(r)
}
if len(r.replicas) > 0 {
r.healthyReplicas.Store(&r.replicas)
go r.monitor()
}
return r
}
type ReadWriteConnResolverOption func(r *ReadWriteConnResolver)
func WithReadOnlyReplica(dbs ...*sql.DB) ReadWriteConnResolverOption {
return func(r *ReadWriteConnResolver) {
r.replicas = append(r.replicas, dbs...)
}
}
func (r *ReadWriteConnResolver) Close() error {
if r.closed.Swap(true) {
return nil
}
var firstErr error
for _, db := range r.replicas {
if err := db.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// healthyReplica returns a random healthy replica.
func (r *ReadWriteConnResolver) ResolveConn(query Query) IConn {
if len(r.replicas) == 0 || !isReadOnlyQuery(query) {
return nil
}
replicas := r.loadHealthyReplicas()
if len(replicas) == 0 {
return nil
}
if len(replicas) == 1 {
return replicas[0]
}
i := r.nextReplica.Add(1)
return replicas[int(i)%len(replicas)]
}
func isReadOnlyQuery(query Query) bool {
sel, ok := query.(*SelectQuery)
if !ok {
return false
}
for _, el := range sel.with {
if !isReadOnlyQuery(el.query) {
return false
}
}
return true
}
func (r *ReadWriteConnResolver) loadHealthyReplicas() []*sql.DB {
if ptr := r.healthyReplicas.Load(); ptr != nil {
return *ptr
}
return nil
}
func (r *ReadWriteConnResolver) monitor() {
const interval = 5 * time.Second
for !r.closed.Load() {
healthy := make([]*sql.DB, 0, len(r.replicas))
for _, replica := range r.replicas {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
err := replica.PingContext(ctx)
cancel()
if err == nil {
healthy = append(healthy, replica)
}
}
r.healthyReplicas.Store(&healthy)
time.Sleep(interval)
}
}

View file

@ -1,6 +1,11 @@
package feature package feature
import "github.com/uptrace/bun/internal" import (
"fmt"
"strconv"
"github.com/uptrace/bun/internal"
)
type Feature = internal.Flag type Feature = internal.Flag
@ -35,4 +40,55 @@
UpdateOrderLimit // UPDATE ... ORDER BY ... LIMIT ... UpdateOrderLimit // UPDATE ... ORDER BY ... LIMIT ...
DeleteOrderLimit // DELETE ... ORDER BY ... LIMIT ... DeleteOrderLimit // DELETE ... ORDER BY ... LIMIT ...
DeleteReturning DeleteReturning
AlterColumnExists // ADD/DROP COLUMN IF NOT EXISTS/IF EXISTS
) )
type NotSupportError struct {
Flag Feature
}
func (err *NotSupportError) Error() string {
name, ok := flag2str[err.Flag]
if !ok {
name = strconv.FormatInt(int64(err.Flag), 10)
}
return fmt.Sprintf("bun: feature %s is not supported by current dialect", name)
}
func NewNotSupportError(flag Feature) *NotSupportError {
return &NotSupportError{Flag: flag}
}
var flag2str = map[Feature]string{
CTE: "CTE",
WithValues: "WithValues",
Returning: "Returning",
InsertReturning: "InsertReturning",
Output: "Output",
DefaultPlaceholder: "DefaultPlaceholder",
DoubleColonCast: "DoubleColonCast",
ValuesRow: "ValuesRow",
UpdateMultiTable: "UpdateMultiTable",
InsertTableAlias: "InsertTableAlias",
UpdateTableAlias: "UpdateTableAlias",
DeleteTableAlias: "DeleteTableAlias",
AutoIncrement: "AutoIncrement",
Identity: "Identity",
TableCascade: "TableCascade",
TableIdentity: "TableIdentity",
TableTruncate: "TableTruncate",
InsertOnConflict: "InsertOnConflict",
InsertOnDuplicateKey: "InsertOnDuplicateKey",
InsertIgnore: "InsertIgnore",
TableNotExists: "TableNotExists",
OffsetFetch: "OffsetFetch",
SelectExists: "SelectExists",
UpdateFromTable: "UpdateFromTable",
MSSavepoint: "MSSavepoint",
GeneratedIdentity: "GeneratedIdentity",
CompositeIn: "CompositeIn",
UpdateOrderLimit: "UpdateOrderLimit",
DeleteOrderLimit: "DeleteOrderLimit",
DeleteReturning: "DeleteReturning",
AlterColumnExists: "AlterColumnExists",
}

View file

@ -3,6 +3,7 @@
import ( import (
"database/sql" "database/sql"
"fmt" "fmt"
"strconv"
"strings" "strings"
"github.com/uptrace/bun" "github.com/uptrace/bun"
@ -25,8 +26,9 @@ func init() {
type Dialect struct { type Dialect struct {
schema.BaseDialect schema.BaseDialect
tables *schema.Tables tables *schema.Tables
features feature.Feature features feature.Feature
uintAsInt bool
} }
var _ schema.Dialect = (*Dialect)(nil) var _ schema.Dialect = (*Dialect)(nil)
@ -53,7 +55,8 @@ func New(opts ...DialectOption) *Dialect {
feature.SelectExists | feature.SelectExists |
feature.GeneratedIdentity | feature.GeneratedIdentity |
feature.CompositeIn | feature.CompositeIn |
feature.DeleteReturning feature.DeleteReturning |
feature.AlterColumnExists
for _, opt := range opts { for _, opt := range opts {
opt(d) opt(d)
@ -70,6 +73,12 @@ func WithoutFeature(other feature.Feature) DialectOption {
} }
} }
func WithAppendUintAsInt(on bool) DialectOption {
return func(d *Dialect) {
d.uintAsInt = on
}
}
func (d *Dialect) Init(*sql.DB) {} func (d *Dialect) Init(*sql.DB) {}
func (d *Dialect) Name() dialect.Name { func (d *Dialect) Name() dialect.Name {
@ -127,6 +136,20 @@ func (d *Dialect) IdentQuote() byte {
return '"' return '"'
} }
func (d *Dialect) AppendUint32(b []byte, n uint32) []byte {
if d.uintAsInt {
return strconv.AppendInt(b, int64(int32(n)), 10)
}
return strconv.AppendUint(b, uint64(n), 10)
}
func (d *Dialect) AppendUint64(b []byte, n uint64) []byte {
if d.uintAsInt {
return strconv.AppendInt(b, int64(n), 10)
}
return strconv.AppendUint(b, n, 10)
}
func (d *Dialect) AppendSequence(b []byte, _ *schema.Table, _ *schema.Field) []byte { func (d *Dialect) AppendSequence(b []byte, _ *schema.Table, _ *schema.Field) []byte {
return appendGeneratedAsIdentity(b) return appendGeneratedAsIdentity(b)
} }

View file

@ -2,5 +2,5 @@
// Version is the current release version. // Version is the current release version.
func Version() string { func Version() string {
return "1.2.8" return "1.2.9"
} }

View file

@ -2,5 +2,5 @@
// Version is the current release version. // Version is the current release version.
func Version() string { func Version() string {
return "1.2.8" return "1.2.9"
} }

View file

@ -94,7 +94,7 @@ func (m *hasManyModel) Scan(src interface{}) error {
for i, f := range m.rel.JoinPKs { for i, f := range m.rel.JoinPKs {
if f.Name == column { if f.Name == column {
m.structKey[i] = indirectFieldValue(field.Value(m.strct)) m.structKey[i] = indirectAsKey(field.Value(m.strct))
break break
} }
} }
@ -144,19 +144,27 @@ func baseValues(model TableModel, fields []*schema.Field) map[internal.MapKey][]
func modelKey(key []interface{}, strct reflect.Value, fields []*schema.Field) []interface{} { func modelKey(key []interface{}, strct reflect.Value, fields []*schema.Field) []interface{} {
for _, f := range fields { for _, f := range fields {
key = append(key, indirectFieldValue(f.Value(strct))) key = append(key, indirectAsKey(f.Value(strct)))
} }
return key return key
} }
// indirectFieldValue return the field value dereferencing the pointer if necessary. // indirectAsKey return the field value dereferencing the pointer if necessary.
// The value is then used as a map key. // The value is then used as a map key.
func indirectFieldValue(field reflect.Value) interface{} { func indirectAsKey(field reflect.Value) interface{} {
if field.Kind() != reflect.Ptr { if field.Kind() != reflect.Ptr {
i := field.Interface() i := field.Interface()
if valuer, ok := i.(driver.Valuer); ok { if valuer, ok := i.(driver.Valuer); ok {
if v, err := valuer.Value(); err == nil { if v, err := valuer.Value(); err == nil {
return v switch reflect.TypeOf(v).Kind() {
case reflect.Array, reflect.Chan, reflect.Func,
reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
// NOTE #1107, these types cannot be used as map key,
// let us use original logic.
return i
default:
return v
}
} }
} }
return i return i

View file

@ -103,7 +103,7 @@ func (m *m2mModel) scanM2MColumn(column string, src interface{}) error {
if err := field.Scan(dest, src); err != nil { if err := field.Scan(dest, src); err != nil {
return err return err
} }
m.structKey = append(m.structKey, indirectFieldValue(dest)) m.structKey = append(m.structKey, indirectAsKey(dest))
break break
} }
} }

View file

@ -1,6 +1,6 @@
{ {
"name": "gobun", "name": "gobun",
"version": "1.2.8", "version": "1.2.9",
"main": "index.js", "main": "index.js",
"repository": "git@github.com:uptrace/bun.git", "repository": "git@github.com:uptrace/bun.git",
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>", "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",

View file

@ -24,7 +24,7 @@
type withQuery struct { type withQuery struct {
name string name string
query schema.QueryAppender query Query
recursive bool recursive bool
} }
@ -114,8 +114,16 @@ func (q *baseQuery) DB() *DB {
return q.db return q.db
} }
func (q *baseQuery) GetConn() IConn { func (q *baseQuery) resolveConn(query Query) IConn {
return q.conn if q.conn != nil {
return q.conn
}
if q.db.resolver != nil {
if conn := q.db.resolver.ResolveConn(query); conn != nil {
return conn
}
}
return q.db.DB
} }
func (q *baseQuery) GetModel() Model { func (q *baseQuery) GetModel() Model {
@ -128,10 +136,8 @@ func (q *baseQuery) GetTableName() string {
} }
for _, wq := range q.with { for _, wq := range q.with {
if v, ok := wq.query.(Query); ok { if model := wq.query.GetModel(); model != nil {
if model := v.GetModel(); model != nil { return wq.query.GetTableName()
return v.GetTableName()
}
} }
} }
@ -249,7 +255,7 @@ func (q *baseQuery) isSoftDelete() bool {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
func (q *baseQuery) addWith(name string, query schema.QueryAppender, recursive bool) { func (q *baseQuery) addWith(name string, query Query, recursive bool) {
q.with = append(q.with, withQuery{ q.with = append(q.with, withQuery{
name: name, name: name,
query: query, query: query,
@ -565,28 +571,33 @@ func (q *baseQuery) scan(
hasDest bool, hasDest bool,
) (sql.Result, error) { ) (sql.Result, error) {
ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, query, q.model) ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, query, q.model)
res, err := q._scan(ctx, iquery, query, model, hasDest)
q.db.afterQuery(ctx, event, res, err)
return res, err
}
rows, err := q.conn.QueryContext(ctx, query) func (q *baseQuery) _scan(
ctx context.Context,
iquery Query,
query string,
model Model,
hasDest bool,
) (sql.Result, error) {
rows, err := q.resolveConn(iquery).QueryContext(ctx, query)
if err != nil { if err != nil {
q.db.afterQuery(ctx, event, nil, err)
return nil, err return nil, err
} }
defer rows.Close() defer rows.Close()
numRow, err := model.ScanRows(ctx, rows) numRow, err := model.ScanRows(ctx, rows)
if err != nil { if err != nil {
q.db.afterQuery(ctx, event, nil, err)
return nil, err return nil, err
} }
if numRow == 0 && hasDest && isSingleRowModel(model) { if numRow == 0 && hasDest && isSingleRowModel(model) {
err = sql.ErrNoRows return nil, sql.ErrNoRows
} }
return driver.RowsAffected(numRow), nil
res := driver.RowsAffected(numRow)
q.db.afterQuery(ctx, event, res, err)
return res, err
} }
func (q *baseQuery) exec( func (q *baseQuery) exec(
@ -595,7 +606,7 @@ func (q *baseQuery) exec(
query string, query string,
) (sql.Result, error) { ) (sql.Result, error) {
ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, query, q.model) ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, query, q.model)
res, err := q.conn.ExecContext(ctx, query) res, err := q.resolveConn(iquery).ExecContext(ctx, query)
q.db.afterQuery(ctx, event, res, err) q.db.afterQuery(ctx, event, res, err)
return res, err return res, err
} }

View file

@ -5,6 +5,7 @@
"database/sql" "database/sql"
"fmt" "fmt"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal" "github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema" "github.com/uptrace/bun/schema"
) )
@ -21,8 +22,7 @@ type AddColumnQuery struct {
func NewAddColumnQuery(db *DB) *AddColumnQuery { func NewAddColumnQuery(db *DB) *AddColumnQuery {
q := &AddColumnQuery{ q := &AddColumnQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
} }
return q return q
@ -133,6 +133,10 @@ func (q *AddColumnQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
func (q *AddColumnQuery) Exec(ctx context.Context, dest ...interface{}) (sql.Result, error) { func (q *AddColumnQuery) Exec(ctx context.Context, dest ...interface{}) (sql.Result, error) {
if q.ifNotExists && !q.hasFeature(feature.AlterColumnExists) {
return nil, feature.NewNotSupportError(feature.AlterColumnExists)
}
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes()) queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -20,8 +20,7 @@ type DropColumnQuery struct {
func NewDropColumnQuery(db *DB) *DropColumnQuery { func NewDropColumnQuery(db *DB) *DropColumnQuery {
q := &DropColumnQuery{ q := &DropColumnQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
} }
return q return q

View file

@ -25,8 +25,7 @@ func NewDeleteQuery(db *DB) *DeleteQuery {
q := &DeleteQuery{ q := &DeleteQuery{
whereBaseQuery: whereBaseQuery{ whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
}, },
} }
@ -58,12 +57,12 @@ func (q *DeleteQuery) Apply(fns ...func(*DeleteQuery) *DeleteQuery) *DeleteQuery
return q return q
} }
func (q *DeleteQuery) With(name string, query schema.QueryAppender) *DeleteQuery { func (q *DeleteQuery) With(name string, query Query) *DeleteQuery {
q.addWith(name, query, false) q.addWith(name, query, false)
return q return q
} }
func (q *DeleteQuery) WithRecursive(name string, query schema.QueryAppender) *DeleteQuery { func (q *DeleteQuery) WithRecursive(name string, query Query) *DeleteQuery {
q.addWith(name, query, true) q.addWith(name, query, true)
return q return q
} }
@ -128,7 +127,7 @@ func (q *DeleteQuery) WhereAllWithDeleted() *DeleteQuery {
func (q *DeleteQuery) Order(orders ...string) *DeleteQuery { func (q *DeleteQuery) Order(orders ...string) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) { if !q.hasFeature(feature.DeleteOrderLimit) {
q.err = errors.New("bun: order is not supported for current dialect") q.err = feature.NewNotSupportError(feature.DeleteOrderLimit)
return q return q
} }
q.addOrder(orders...) q.addOrder(orders...)
@ -137,7 +136,7 @@ func (q *DeleteQuery) Order(orders ...string) *DeleteQuery {
func (q *DeleteQuery) OrderExpr(query string, args ...interface{}) *DeleteQuery { func (q *DeleteQuery) OrderExpr(query string, args ...interface{}) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) { if !q.hasFeature(feature.DeleteOrderLimit) {
q.err = errors.New("bun: order is not supported for current dialect") q.err = feature.NewNotSupportError(feature.DeleteOrderLimit)
return q return q
} }
q.addOrderExpr(query, args...) q.addOrderExpr(query, args...)
@ -152,7 +151,7 @@ func (q *DeleteQuery) ForceDelete() *DeleteQuery {
// ------------------------------------------------------------------------------ // ------------------------------------------------------------------------------
func (q *DeleteQuery) Limit(n int) *DeleteQuery { func (q *DeleteQuery) Limit(n int) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) { if !q.hasFeature(feature.DeleteOrderLimit) {
q.err = errors.New("bun: limit is not supported for current dialect") q.err = feature.NewNotSupportError(feature.DeleteOrderLimit)
return q return q
} }
q.setLimit(n) q.setLimit(n)
@ -166,7 +165,7 @@ func (q *DeleteQuery) Limit(n int) *DeleteQuery {
// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`. // To suppress the auto-generated RETURNING clause, use `Returning("NULL")`.
func (q *DeleteQuery) Returning(query string, args ...interface{}) *DeleteQuery { func (q *DeleteQuery) Returning(query string, args ...interface{}) *DeleteQuery {
if !q.hasFeature(feature.DeleteReturning) { if !q.hasFeature(feature.DeleteReturning) {
q.err = errors.New("bun: returning is not supported for current dialect") q.err = feature.NewNotSupportError(feature.DeleteOrderLimit)
return q return q
} }

View file

@ -29,8 +29,7 @@ func NewCreateIndexQuery(db *DB) *CreateIndexQuery {
q := &CreateIndexQuery{ q := &CreateIndexQuery{
whereBaseQuery: whereBaseQuery{ whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
}, },
} }

View file

@ -24,8 +24,7 @@ type DropIndexQuery struct {
func NewDropIndexQuery(db *DB) *DropIndexQuery { func NewDropIndexQuery(db *DB) *DropIndexQuery {
q := &DropIndexQuery{ q := &DropIndexQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
} }
return q return q

View file

@ -31,8 +31,7 @@ func NewInsertQuery(db *DB) *InsertQuery {
q := &InsertQuery{ q := &InsertQuery{
whereBaseQuery: whereBaseQuery{ whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
}, },
} }
@ -64,12 +63,12 @@ func (q *InsertQuery) Apply(fns ...func(*InsertQuery) *InsertQuery) *InsertQuery
return q return q
} }
func (q *InsertQuery) With(name string, query schema.QueryAppender) *InsertQuery { func (q *InsertQuery) With(name string, query Query) *InsertQuery {
q.addWith(name, query, false) q.addWith(name, query, false)
return q return q
} }
func (q *InsertQuery) WithRecursive(name string, query schema.QueryAppender) *InsertQuery { func (q *InsertQuery) WithRecursive(name string, query Query) *InsertQuery {
q.addWith(name, query, true) q.addWith(name, query, true)
return q return q
} }

View file

@ -26,8 +26,7 @@ type MergeQuery struct {
func NewMergeQuery(db *DB) *MergeQuery { func NewMergeQuery(db *DB) *MergeQuery {
q := &MergeQuery{ q := &MergeQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
} }
if q.db.dialect.Name() != dialect.MSSQL && q.db.dialect.Name() != dialect.PG { if q.db.dialect.Name() != dialect.MSSQL && q.db.dialect.Name() != dialect.PG {
@ -61,12 +60,12 @@ func (q *MergeQuery) Apply(fns ...func(*MergeQuery) *MergeQuery) *MergeQuery {
return q return q
} }
func (q *MergeQuery) With(name string, query schema.QueryAppender) *MergeQuery { func (q *MergeQuery) With(name string, query Query) *MergeQuery {
q.addWith(name, query, false) q.addWith(name, query, false)
return q return q
} }
func (q *MergeQuery) WithRecursive(name string, query schema.QueryAppender) *MergeQuery { func (q *MergeQuery) WithRecursive(name string, query Query) *MergeQuery {
q.addWith(name, query, true) q.addWith(name, query, true)
return q return q
} }

View file

@ -15,23 +15,10 @@ type RawQuery struct {
comment string comment string
} }
// Deprecated: Use NewRaw instead. When add it to IDB, it conflicts with the sql.Conn#Raw
func (db *DB) Raw(query string, args ...interface{}) *RawQuery {
return &RawQuery{
baseQuery: baseQuery{
db: db,
conn: db.DB,
},
query: query,
args: args,
}
}
func NewRawQuery(db *DB, query string, args ...interface{}) *RawQuery { func NewRawQuery(db *DB, query string, args ...interface{}) *RawQuery {
return &RawQuery{ return &RawQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
query: query, query: query,
args: args, args: args,

View file

@ -41,8 +41,7 @@ func NewSelectQuery(db *DB) *SelectQuery {
return &SelectQuery{ return &SelectQuery{
whereBaseQuery: whereBaseQuery{ whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
}, },
} }
@ -73,12 +72,12 @@ func (q *SelectQuery) Apply(fns ...func(*SelectQuery) *SelectQuery) *SelectQuery
return q return q
} }
func (q *SelectQuery) With(name string, query schema.QueryAppender) *SelectQuery { func (q *SelectQuery) With(name string, query Query) *SelectQuery {
q.addWith(name, query, false) q.addWith(name, query, false)
return q return q
} }
func (q *SelectQuery) WithRecursive(name string, query schema.QueryAppender) *SelectQuery { func (q *SelectQuery) WithRecursive(name string, query Query) *SelectQuery {
q.addWith(name, query, true) q.addWith(name, query, true)
return q return q
} }
@ -537,6 +536,13 @@ func (q *SelectQuery) appendQuery(
return nil, err return nil, err
} }
if err := q.forEachInlineRelJoin(func(j *relationJoin) error {
j.applyTo(q)
return nil
}); err != nil {
return nil, err
}
b = append(b, "SELECT "...) b = append(b, "SELECT "...)
if len(q.distinctOn) > 0 { if len(q.distinctOn) > 0 {
@ -730,8 +736,6 @@ func (q *SelectQuery) appendColumns(fmter schema.Formatter, b []byte) (_ []byte,
func (q *SelectQuery) appendInlineRelColumns( func (q *SelectQuery) appendInlineRelColumns(
fmter schema.Formatter, b []byte, join *relationJoin, fmter schema.Formatter, b []byte, join *relationJoin,
) (_ []byte, err error) { ) (_ []byte, err error) {
join.applyTo(q)
if join.columns != nil { if join.columns != nil {
table := join.JoinModel.Table() table := join.JoinModel.Table()
for i, col := range join.columns { for i, col := range join.columns {
@ -795,7 +799,7 @@ func (q *SelectQuery) Rows(ctx context.Context) (*sql.Rows, error) {
query := internal.String(queryBytes) query := internal.String(queryBytes)
ctx, event := q.db.beforeQuery(ctx, q, query, nil, query, q.model) ctx, event := q.db.beforeQuery(ctx, q, query, nil, query, q.model)
rows, err := q.conn.QueryContext(ctx, query) rows, err := q.resolveConn(q).QueryContext(ctx, query)
q.db.afterQuery(ctx, event, nil, err) q.db.afterQuery(ctx, event, nil, err)
return rows, err return rows, err
} }
@ -931,7 +935,7 @@ func (q *SelectQuery) Count(ctx context.Context) (int, error) {
ctx, event := q.db.beforeQuery(ctx, qq, query, nil, query, q.model) ctx, event := q.db.beforeQuery(ctx, qq, query, nil, query, q.model)
var num int var num int
err = q.conn.QueryRowContext(ctx, query).Scan(&num) err = q.resolveConn(q).QueryRowContext(ctx, query).Scan(&num)
q.db.afterQuery(ctx, event, nil, err) q.db.afterQuery(ctx, event, nil, err)
@ -949,13 +953,15 @@ func (q *SelectQuery) ScanAndCount(ctx context.Context, dest ...interface{}) (in
return int(n), nil return int(n), nil
} }
} }
if _, ok := q.conn.(*DB); ok { if q.conn == nil {
return q.scanAndCountConc(ctx, dest...) return q.scanAndCountConcurrently(ctx, dest...)
} }
return q.scanAndCountSeq(ctx, dest...) return q.scanAndCountSeq(ctx, dest...)
} }
func (q *SelectQuery) scanAndCountConc(ctx context.Context, dest ...interface{}) (int, error) { func (q *SelectQuery) scanAndCountConcurrently(
ctx context.Context, dest ...interface{},
) (int, error) {
var count int var count int
var wg sync.WaitGroup var wg sync.WaitGroup
var mu sync.Mutex var mu sync.Mutex
@ -1033,7 +1039,7 @@ func (q *SelectQuery) selectExists(ctx context.Context) (bool, error) {
ctx, event := q.db.beforeQuery(ctx, qq, query, nil, query, q.model) ctx, event := q.db.beforeQuery(ctx, qq, query, nil, query, q.model)
var exists bool var exists bool
err = q.conn.QueryRowContext(ctx, query).Scan(&exists) err = q.resolveConn(q).QueryRowContext(ctx, query).Scan(&exists)
q.db.afterQuery(ctx, event, nil, err) q.db.afterQuery(ctx, event, nil, err)

View file

@ -40,8 +40,7 @@ type CreateTableQuery struct {
func NewCreateTableQuery(db *DB) *CreateTableQuery { func NewCreateTableQuery(db *DB) *CreateTableQuery {
q := &CreateTableQuery{ q := &CreateTableQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
varchar: db.Dialect().DefaultVarcharLen(), varchar: db.Dialect().DefaultVarcharLen(),
} }

View file

@ -21,8 +21,7 @@ type DropTableQuery struct {
func NewDropTableQuery(db *DB) *DropTableQuery { func NewDropTableQuery(db *DB) *DropTableQuery {
q := &DropTableQuery{ q := &DropTableQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
} }
return q return q

View file

@ -22,8 +22,7 @@ type TruncateTableQuery struct {
func NewTruncateTableQuery(db *DB) *TruncateTableQuery { func NewTruncateTableQuery(db *DB) *TruncateTableQuery {
q := &TruncateTableQuery{ q := &TruncateTableQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
} }
return q return q

View file

@ -32,8 +32,7 @@ func NewUpdateQuery(db *DB) *UpdateQuery {
q := &UpdateQuery{ q := &UpdateQuery{
whereBaseQuery: whereBaseQuery{ whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
}, },
} }
@ -65,12 +64,12 @@ func (q *UpdateQuery) Apply(fns ...func(*UpdateQuery) *UpdateQuery) *UpdateQuery
return q return q
} }
func (q *UpdateQuery) With(name string, query schema.QueryAppender) *UpdateQuery { func (q *UpdateQuery) With(name string, query Query) *UpdateQuery {
q.addWith(name, query, false) q.addWith(name, query, false)
return q return q
} }
func (q *UpdateQuery) WithRecursive(name string, query schema.QueryAppender) *UpdateQuery { func (q *UpdateQuery) WithRecursive(name string, query Query) *UpdateQuery {
q.addWith(name, query, true) q.addWith(name, query, true)
return q return q
} }
@ -207,7 +206,7 @@ func (q *UpdateQuery) WhereAllWithDeleted() *UpdateQuery {
// ------------------------------------------------------------------------------ // ------------------------------------------------------------------------------
func (q *UpdateQuery) Order(orders ...string) *UpdateQuery { func (q *UpdateQuery) Order(orders ...string) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) { if !q.hasFeature(feature.UpdateOrderLimit) {
q.err = errors.New("bun: order is not supported for current dialect") q.err = feature.NewNotSupportError(feature.UpdateOrderLimit)
return q return q
} }
q.addOrder(orders...) q.addOrder(orders...)
@ -216,7 +215,7 @@ func (q *UpdateQuery) Order(orders ...string) *UpdateQuery {
func (q *UpdateQuery) OrderExpr(query string, args ...interface{}) *UpdateQuery { func (q *UpdateQuery) OrderExpr(query string, args ...interface{}) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) { if !q.hasFeature(feature.UpdateOrderLimit) {
q.err = errors.New("bun: order is not supported for current dialect") q.err = feature.NewNotSupportError(feature.UpdateOrderLimit)
return q return q
} }
q.addOrderExpr(query, args...) q.addOrderExpr(query, args...)
@ -225,7 +224,7 @@ func (q *UpdateQuery) OrderExpr(query string, args ...interface{}) *UpdateQuery
func (q *UpdateQuery) Limit(n int) *UpdateQuery { func (q *UpdateQuery) Limit(n int) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) { if !q.hasFeature(feature.UpdateOrderLimit) {
q.err = errors.New("bun: limit is not supported for current dialect") q.err = feature.NewNotSupportError(feature.UpdateOrderLimit)
return q return q
} }
q.setLimit(n) q.setLimit(n)

View file

@ -25,8 +25,7 @@ type ValuesQuery struct {
func NewValuesQuery(db *DB, model interface{}) *ValuesQuery { func NewValuesQuery(db *DB, model interface{}) *ValuesQuery {
q := &ValuesQuery{ q := &ValuesQuery{
baseQuery: baseQuery{ baseQuery: baseQuery{
db: db, db: db,
conn: db.DB,
}, },
} }
q.setModel(model) q.setModel(model)

View file

@ -2,5 +2,5 @@
// Version is the current release version. // Version is the current release version.
func Version() string { func Version() string {
return "1.2.8" return "1.2.9"
} }

16
vendor/modules.txt vendored
View file

@ -589,7 +589,7 @@ github.com/prometheus/common/model
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/internal/util
# github.com/puzpuzpuz/xsync/v3 v3.4.0 # github.com/puzpuzpuz/xsync/v3 v3.5.0
## explicit; go 1.18 ## explicit; go 1.18
github.com/puzpuzpuz/xsync/v3 github.com/puzpuzpuz/xsync/v3
# github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b # github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b
@ -939,7 +939,7 @@ github.com/ugorji/go/codec
github.com/ulule/limiter/v3 github.com/ulule/limiter/v3
github.com/ulule/limiter/v3/drivers/store/common github.com/ulule/limiter/v3/drivers/store/common
github.com/ulule/limiter/v3/drivers/store/memory github.com/ulule/limiter/v3/drivers/store/memory
# github.com/uptrace/bun v1.2.8 # github.com/uptrace/bun v1.2.9
## explicit; go 1.22.0 ## explicit; go 1.22.0
github.com/uptrace/bun github.com/uptrace/bun
github.com/uptrace/bun/dialect github.com/uptrace/bun/dialect
@ -953,13 +953,13 @@ github.com/uptrace/bun/internal/tagparser
github.com/uptrace/bun/migrate github.com/uptrace/bun/migrate
github.com/uptrace/bun/migrate/sqlschema github.com/uptrace/bun/migrate/sqlschema
github.com/uptrace/bun/schema github.com/uptrace/bun/schema
# github.com/uptrace/bun/dialect/pgdialect v1.2.8 # github.com/uptrace/bun/dialect/pgdialect v1.2.9
## explicit; go 1.22.0 ## explicit; go 1.22.0
github.com/uptrace/bun/dialect/pgdialect github.com/uptrace/bun/dialect/pgdialect
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.8 # github.com/uptrace/bun/dialect/sqlitedialect v1.2.9
## explicit; go 1.22.0 ## explicit; go 1.22.0
github.com/uptrace/bun/dialect/sqlitedialect github.com/uptrace/bun/dialect/sqlitedialect
# github.com/uptrace/bun/extra/bunotel v1.2.8 # github.com/uptrace/bun/extra/bunotel v1.2.9
## explicit; go 1.22.0 ## explicit; go 1.22.0
github.com/uptrace/bun/extra/bunotel github.com/uptrace/bun/extra/bunotel
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 # github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2
@ -997,7 +997,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw
go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/bsontype
go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/bson/primitive
go.mongodb.org/mongo-driver/x/bsonx/bsoncore go.mongodb.org/mongo-driver/x/bsonx/bsoncore
# go.opentelemetry.io/otel v1.33.0 => go.opentelemetry.io/otel v1.29.0 # go.opentelemetry.io/otel v1.34.0 => go.opentelemetry.io/otel v1.29.0
## explicit; go 1.21 ## explicit; go 1.21
go.opentelemetry.io/otel go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/attribute
@ -1038,7 +1038,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
# go.opentelemetry.io/otel/exporters/prometheus v0.51.0 # go.opentelemetry.io/otel/exporters/prometheus v0.51.0
## explicit; go 1.21 ## explicit; go 1.21
go.opentelemetry.io/otel/exporters/prometheus go.opentelemetry.io/otel/exporters/prometheus
# go.opentelemetry.io/otel/metric v1.33.0 => go.opentelemetry.io/otel/metric v1.29.0 # go.opentelemetry.io/otel/metric v1.34.0 => go.opentelemetry.io/otel/metric v1.29.0
## explicit; go 1.21 ## explicit; go 1.21
go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/embedded
@ -1059,7 +1059,7 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate
go.opentelemetry.io/otel/sdk/metric/internal/exemplar go.opentelemetry.io/otel/sdk/metric/internal/exemplar
go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/internal/x
go.opentelemetry.io/otel/sdk/metric/metricdata go.opentelemetry.io/otel/sdk/metric/metricdata
# go.opentelemetry.io/otel/trace v1.33.0 => go.opentelemetry.io/otel/trace v1.29.0 # go.opentelemetry.io/otel/trace v1.34.0 => go.opentelemetry.io/otel/trace v1.29.0
## explicit; go 1.21 ## explicit; go 1.21
go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/embedded