mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-23 04:06:39 +00:00
[chore]: Bump go.opentelemetry.io/otel/sdk from 1.29.0 to 1.32.0
Bumps [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) from 1.29.0 to 1.32.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.32.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
98eef328ea
commit
bb8dfd3368
8
go.mod
8
go.mod
|
@ -69,14 +69,14 @@ require (
|
|||
github.com/uptrace/bun/extra/bunotel v1.2.5
|
||||
github.com/wagslane/go-password-validator v0.3.0
|
||||
github.com/yuin/goldmark v1.7.8
|
||||
go.opentelemetry.io/otel v1.31.0
|
||||
go.opentelemetry.io/otel v1.32.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.51.0
|
||||
go.opentelemetry.io/otel/metric v1.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.29.0
|
||||
go.opentelemetry.io/otel/metric v1.32.0
|
||||
go.opentelemetry.io/otel/sdk v1.32.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.29.0
|
||||
go.opentelemetry.io/otel/trace v1.31.0
|
||||
go.opentelemetry.io/otel/trace v1.32.0
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
golang.org/x/crypto v0.29.0
|
||||
golang.org/x/image v0.21.0
|
||||
|
|
4
go.sum
generated
4
go.sum
generated
|
@ -644,8 +644,8 @@ go.opentelemetry.io/otel/exporters/prometheus v0.51.0 h1:G7uexXb/K3T+T9fNLCCKncw
|
|||
go.opentelemetry.io/otel/exporters/prometheus v0.51.0/go.mod h1:v0mFe5Kk7woIh938mrZBJBmENYquyA0IICrlYm4Y0t4=
|
||||
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
||||
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
||||
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
|
||||
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
|
||||
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
|
||||
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
|
||||
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
||||
|
|
4
vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
generated
vendored
|
@ -3,6 +3,8 @@
|
|||
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
|
||||
import "go.opentelemetry.io/otel/attribute"
|
||||
|
||||
// Scope represents the instrumentation scope.
|
||||
type Scope struct {
|
||||
// Name is the name of the instrumentation scope. This should be the
|
||||
|
@ -12,4 +14,6 @@ type Scope struct {
|
|||
Version string
|
||||
// SchemaURL of the telemetry emitted by the scope.
|
||||
SchemaURL string
|
||||
// Attributes of the telemetry emitted by the scope.
|
||||
Attributes attribute.Set
|
||||
}
|
||||
|
|
50
vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
generated
vendored
50
vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
generated
vendored
|
@ -7,7 +7,6 @@
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrPartialResource is returned by a detector when complete source
|
||||
|
@ -58,61 +57,36 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
|
|||
func detect(ctx context.Context, res *Resource, detectors []Detector) error {
|
||||
var (
|
||||
r *Resource
|
||||
errs detectErrs
|
||||
err error
|
||||
e error
|
||||
)
|
||||
|
||||
for _, detector := range detectors {
|
||||
if detector == nil {
|
||||
continue
|
||||
}
|
||||
r, err = detector.Detect(ctx)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
if !errors.Is(err, ErrPartialResource) {
|
||||
r, e = detector.Detect(ctx)
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
if !errors.Is(e, ErrPartialResource) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
r, err = Merge(res, r)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
r, e = Merge(res, r)
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
*res = *r
|
||||
}
|
||||
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if errors.Is(errs, ErrSchemaURLConflict) {
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrSchemaURLConflict) {
|
||||
// If there has been a merge conflict, ensure the resource has no
|
||||
// schema URL.
|
||||
res.schemaURL = ""
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
type detectErrs []error
|
||||
|
||||
func (e detectErrs) Error() string {
|
||||
errStr := make([]string, len(e))
|
||||
for i, err := range e {
|
||||
errStr[i] = fmt.Sprintf("* %s", err)
|
||||
err = fmt.Errorf("error detecting resource: %w", err)
|
||||
}
|
||||
|
||||
format := "%d errors occurred detecting resource:\n\t%s"
|
||||
return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t"))
|
||||
}
|
||||
|
||||
func (e detectErrs) Unwrap() error {
|
||||
switch len(e) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return e[0]
|
||||
}
|
||||
return e[1:]
|
||||
}
|
||||
|
||||
func (e detectErrs) Is(target error) bool {
|
||||
return len(e) != 0 && errors.Is(e[0], target)
|
||||
return err
|
||||
}
|
||||
|
|
6
vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
generated
vendored
|
@ -20,15 +20,13 @@
|
|||
// telemetrySDK is a Detector that provides information about
|
||||
// the OpenTelemetry SDK used. This Detector is included as a
|
||||
// builtin. If these resource attributes are not wanted, use
|
||||
// the WithTelemetrySDK(nil) or WithoutBuiltin() options to
|
||||
// explicitly disable them.
|
||||
// resource.New() to explicitly disable them.
|
||||
telemetrySDK struct{}
|
||||
|
||||
// host is a Detector that provides information about the host
|
||||
// being run on. This Detector is included as a builtin. If
|
||||
// these resource attributes are not wanted, use the
|
||||
// WithHost(nil) or WithoutBuiltin() options to explicitly
|
||||
// disable them.
|
||||
// resource.New() to explicitly disable them.
|
||||
host struct{}
|
||||
|
||||
stringDetector struct {
|
||||
|
|
7
vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
generated
vendored
|
@ -10,17 +10,16 @@
|
|||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// implements hostIDReader
|
||||
// implements hostIDReader.
|
||||
type hostIDReaderWindows struct{}
|
||||
|
||||
// read reads MachineGuid from the windows registry key:
|
||||
// SOFTWARE\Microsoft\Cryptography
|
||||
// read reads MachineGuid from the Windows registry key:
|
||||
// SOFTWARE\Microsoft\Cryptography.
|
||||
func (*hostIDReaderWindows) read() (string, error) {
|
||||
k, err := registry.OpenKey(
|
||||
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
|
||||
registry.QUERY_VALUE|registry.WOW64_64KEY,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
1
vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
generated
vendored
1
vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
generated
vendored
|
@ -17,7 +17,6 @@
|
|||
func platformOSDescription() (string, error) {
|
||||
k, err := registry.OpenKey(
|
||||
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
7
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
|
@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
|
|||
//
|
||||
// It is up to the exporter to implement any type of retry logic if a batch is failing
|
||||
// to be exported, since it is specific to the protocol and backend being sent to.
|
||||
clear(bsp.batch) // Erase elements to let GC collect objects
|
||||
bsp.batch = bsp.batch[:0]
|
||||
|
||||
if err != nil {
|
||||
|
@ -316,7 +317,11 @@ func (bsp *batchSpanProcessor) processQueue() {
|
|||
bsp.batchMutex.Unlock()
|
||||
if shouldExport {
|
||||
if !bsp.timer.Stop() {
|
||||
<-bsp.timer.C
|
||||
// Handle both GODEBUG=asynctimerchan=[0|1] properly.
|
||||
select {
|
||||
case <-bsp.timer.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err := bsp.exportSpans(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
|
|
11
vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
generated
vendored
11
vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
generated
vendored
|
@ -15,14 +15,15 @@ type evictedQueue[T any] struct {
|
|||
queue []T
|
||||
capacity int
|
||||
droppedCount int
|
||||
logDropped func()
|
||||
logDroppedMsg string
|
||||
logDroppedOnce sync.Once
|
||||
}
|
||||
|
||||
func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
|
||||
// Do not pre-allocate queue, do this lazily.
|
||||
return evictedQueue[Event]{
|
||||
capacity: capacity,
|
||||
logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }),
|
||||
logDroppedMsg: "limit reached: dropping trace trace.Event",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,7 +31,7 @@ func newEvictedQueueLink(capacity int) evictedQueue[Link] {
|
|||
// Do not pre-allocate queue, do this lazily.
|
||||
return evictedQueue[Link]{
|
||||
capacity: capacity,
|
||||
logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }),
|
||||
logDroppedMsg: "limit reached: dropping trace trace.Link",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,6 +54,10 @@ func (eq *evictedQueue[T]) add(value T) {
|
|||
eq.queue = append(eq.queue, value)
|
||||
}
|
||||
|
||||
func (eq *evictedQueue[T]) logDropped() {
|
||||
eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
|
||||
}
|
||||
|
||||
// copy returns a copy of the evictedQueue.
|
||||
func (eq *evictedQueue[T]) copy() []T {
|
||||
return slices.Clone(eq.queue)
|
||||
|
|
3
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
3
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
|
@ -142,6 +142,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
|||
Name: name,
|
||||
Version: c.InstrumentationVersion(),
|
||||
SchemaURL: c.SchemaURL(),
|
||||
Attributes: c.InstrumentationAttributes(),
|
||||
}
|
||||
|
||||
t, ok := func() (trace.Tracer, bool) {
|
||||
|
@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
|||
// slowing down all tracing consumers.
|
||||
// - Logging code may be instrumented with tracing and deadlock because it could try
|
||||
// acquiring the same non-reentrant mutex.
|
||||
global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL)
|
||||
global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
|
108
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
108
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
|
@ -174,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool {
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.isRecording()
|
||||
}
|
||||
|
||||
// isRecording returns if this span is being recorded. If this span has ended
|
||||
// this will return false.
|
||||
//
|
||||
// This method assumes s.mu.Lock is held by the caller.
|
||||
func (s *recordingSpan) isRecording() bool {
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
return s.endTime.IsZero()
|
||||
}
|
||||
|
||||
|
@ -182,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool {
|
|||
// included in the set status when the code is for an error. If this span is
|
||||
// not being recorded than this method does nothing.
|
||||
func (s *recordingSpan) SetStatus(code codes.Code, description string) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
if s.status.Code > code {
|
||||
return
|
||||
}
|
||||
|
@ -210,12 +225,15 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) {
|
|||
// attributes the span is configured to have, the last added attributes will
|
||||
// be dropped.
|
||||
func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil || len(attributes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
|
||||
limit := s.tracer.provider.spanLimits.AttributeCountLimit
|
||||
if limit == 0 {
|
||||
|
@ -233,7 +251,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
|
|||
|
||||
// Otherwise, add without deduplication. When attributes are read they
|
||||
// will be deduplicated, optimizing the operation.
|
||||
s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes))
|
||||
s.attributes = slices.Grow(s.attributes, len(attributes))
|
||||
for _, a := range attributes {
|
||||
if !a.Valid() {
|
||||
// Drop all invalid attributes.
|
||||
|
@ -280,13 +298,17 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
|
|||
|
||||
// Do not set a capacity when creating this map. Benchmark testing has
|
||||
// showed this to only add unused memory allocations in general use.
|
||||
exists := make(map[attribute.Key]int)
|
||||
s.dedupeAttrsFromRecord(&exists)
|
||||
exists := make(map[attribute.Key]int, len(s.attributes))
|
||||
s.dedupeAttrsFromRecord(exists)
|
||||
|
||||
// Now that s.attributes is deduplicated, adding unique attributes up to
|
||||
// the capacity of s will not over allocate s.attributes.
|
||||
sum := len(attrs) + len(s.attributes)
|
||||
s.attributes = slices.Grow(s.attributes, min(sum, limit))
|
||||
|
||||
// max size = limit
|
||||
maxCap := min(len(attrs)+len(s.attributes), limit)
|
||||
if cap(s.attributes) < maxCap {
|
||||
s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
|
||||
}
|
||||
for _, a := range attrs {
|
||||
if !a.Valid() {
|
||||
// Drop all invalid attributes.
|
||||
|
@ -296,6 +318,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
|
|||
|
||||
if idx, ok := exists[a.Key]; ok {
|
||||
// Perform all updates before dropping, even when at capacity.
|
||||
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
|
||||
s.attributes[idx] = a
|
||||
continue
|
||||
}
|
||||
|
@ -386,9 +409,10 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
|
|||
// the span's duration in case some operation below takes a while.
|
||||
et := monotonicEndTime(s.startTime)
|
||||
|
||||
// Do relative expensive check now that we have an end time and see if we
|
||||
// need to do any more processing.
|
||||
if !s.IsRecording() {
|
||||
// Lock the span now that we have an end time and see if we need to do any more processing.
|
||||
s.mu.Lock()
|
||||
if !s.isRecording() {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -413,10 +437,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
|
|||
}
|
||||
|
||||
if s.executionTracerTaskEnd != nil {
|
||||
s.mu.Unlock()
|
||||
s.executionTracerTaskEnd()
|
||||
s.mu.Lock()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
// Setting endTime to non-zero marks the span as ended and not recording.
|
||||
if config.Timestamp().IsZero() {
|
||||
s.endTime = et
|
||||
|
@ -450,7 +475,13 @@ func monotonicEndTime(start time.Time) time.Time {
|
|||
// does not change the Span status. If this span is not being recorded or err is nil
|
||||
// than this method does nothing.
|
||||
func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
|
||||
if s == nil || err == nil || !s.IsRecording() {
|
||||
if s == nil || err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -486,14 +517,23 @@ func recordStackTrace() string {
|
|||
}
|
||||
|
||||
// AddEvent adds an event with the provided name and options. If this span is
|
||||
// not being recorded than this method does nothing.
|
||||
// not being recorded then this method does nothing.
|
||||
func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
s.addEvent(name, o...)
|
||||
}
|
||||
|
||||
// addEvent adds an event with the provided name and options.
|
||||
//
|
||||
// This method assumes s.mu.Lock is held by the caller.
|
||||
func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
|
||||
c := trace.NewEventConfig(o...)
|
||||
e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
|
||||
|
@ -510,20 +550,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
|
|||
e.Attributes = e.Attributes[:limit]
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.events.add(e)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetName sets the name of this span. If this span is not being recorded than
|
||||
// this method does nothing.
|
||||
func (s *recordingSpan) SetName(name string) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
s.name = name
|
||||
}
|
||||
|
||||
|
@ -579,29 +620,26 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue {
|
|||
func (s *recordingSpan) dedupeAttrs() {
|
||||
// Do not set a capacity when creating this map. Benchmark testing has
|
||||
// showed this to only add unused memory allocations in general use.
|
||||
exists := make(map[attribute.Key]int)
|
||||
s.dedupeAttrsFromRecord(&exists)
|
||||
exists := make(map[attribute.Key]int, len(s.attributes))
|
||||
s.dedupeAttrsFromRecord(exists)
|
||||
}
|
||||
|
||||
// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
|
||||
// using record as the record of unique attribute keys to their index.
|
||||
//
|
||||
// This method assumes s.mu.Lock is held by the caller.
|
||||
func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) {
|
||||
func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
|
||||
// Use the fact that slices share the same backing array.
|
||||
unique := s.attributes[:0]
|
||||
for _, a := range s.attributes {
|
||||
if idx, ok := (*record)[a.Key]; ok {
|
||||
if idx, ok := record[a.Key]; ok {
|
||||
unique[idx] = a
|
||||
} else {
|
||||
unique = append(unique, a)
|
||||
(*record)[a.Key] = len(unique) - 1
|
||||
record[a.Key] = len(unique) - 1
|
||||
}
|
||||
}
|
||||
// s.attributes have element types of attribute.KeyValue. These types are
|
||||
// not pointers and they themselves do not contain pointer fields,
|
||||
// therefore the duplicate values do not need to be zeroed for them to be
|
||||
// garbage collected.
|
||||
clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects.
|
||||
s.attributes = unique
|
||||
}
|
||||
|
||||
|
@ -657,7 +695,7 @@ func (s *recordingSpan) Resource() *resource.Resource {
|
|||
}
|
||||
|
||||
func (s *recordingSpan) AddLink(link trace.Link) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
|
||||
|
@ -665,6 +703,12 @@ func (s *recordingSpan) AddLink(link trace.Link) {
|
|||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
|
||||
l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
|
||||
|
||||
// Discard attributes over limit.
|
||||
|
@ -678,9 +722,7 @@ func (s *recordingSpan) AddLink(link trace.Link) {
|
|||
l.Attributes = l.Attributes[:limit]
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.links.add(l)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// DroppedAttributes returns the number of attributes dropped by the span
|
||||
|
@ -755,12 +797,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan {
|
|||
}
|
||||
|
||||
func (s *recordingSpan) addChild() {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
s.childSpanCount++
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (*recordingSpan) private() {}
|
||||
|
|
2
vendor/go.opentelemetry.io/otel/sdk/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/version.go
generated
vendored
|
@ -5,5 +5,5 @@
|
|||
|
||||
// Version is the current release version of the OpenTelemetry SDK in use.
|
||||
func Version() string {
|
||||
return "1.29.0"
|
||||
return "1.32.0"
|
||||
}
|
||||
|
|
10
vendor/modules.txt
vendored
10
vendor/modules.txt
vendored
|
@ -974,7 +974,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw
|
|||
go.mongodb.org/mongo-driver/bson/bsontype
|
||||
go.mongodb.org/mongo-driver/bson/primitive
|
||||
go.mongodb.org/mongo-driver/x/bsonx/bsoncore
|
||||
# go.opentelemetry.io/otel v1.31.0 => go.opentelemetry.io/otel v1.29.0
|
||||
# go.opentelemetry.io/otel v1.32.0 => go.opentelemetry.io/otel v1.29.0
|
||||
## explicit; go 1.21
|
||||
go.opentelemetry.io/otel
|
||||
go.opentelemetry.io/otel/attribute
|
||||
|
@ -1015,13 +1015,13 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
|
|||
# go.opentelemetry.io/otel/exporters/prometheus v0.51.0
|
||||
## explicit; go 1.21
|
||||
go.opentelemetry.io/otel/exporters/prometheus
|
||||
# go.opentelemetry.io/otel/metric v1.31.0 => go.opentelemetry.io/otel/metric v1.29.0
|
||||
# go.opentelemetry.io/otel/metric v1.32.0 => go.opentelemetry.io/otel/metric v1.29.0
|
||||
## explicit; go 1.21
|
||||
go.opentelemetry.io/otel/metric
|
||||
go.opentelemetry.io/otel/metric/embedded
|
||||
go.opentelemetry.io/otel/metric/noop
|
||||
# go.opentelemetry.io/otel/sdk v1.29.0
|
||||
## explicit; go 1.21
|
||||
# go.opentelemetry.io/otel/sdk v1.32.0
|
||||
## explicit; go 1.22
|
||||
go.opentelemetry.io/otel/sdk
|
||||
go.opentelemetry.io/otel/sdk/instrumentation
|
||||
go.opentelemetry.io/otel/sdk/internal/env
|
||||
|
@ -1036,7 +1036,7 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate
|
|||
go.opentelemetry.io/otel/sdk/metric/internal/exemplar
|
||||
go.opentelemetry.io/otel/sdk/metric/internal/x
|
||||
go.opentelemetry.io/otel/sdk/metric/metricdata
|
||||
# go.opentelemetry.io/otel/trace v1.31.0 => go.opentelemetry.io/otel/trace v1.29.0
|
||||
# go.opentelemetry.io/otel/trace v1.32.0 => go.opentelemetry.io/otel/trace v1.29.0
|
||||
## explicit; go 1.21
|
||||
go.opentelemetry.io/otel/trace
|
||||
go.opentelemetry.io/otel/trace/embedded
|
||||
|
|
Loading…
Reference in a new issue