Update module github.com/openshift/imagebuilder to v1.2.6

Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
pull/21740/head
renovate[bot] 2024-02-24 17:16:24 +00:00 committed by GitHub
parent ea3221a862
commit 6089769026
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
42 changed files with 1679 additions and 407 deletions

13
go.mod
View File

@ -57,7 +57,7 @@ require (
github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc
github.com/opencontainers/selinux v1.11.0
github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510
github.com/openshift/imagebuilder v1.2.6
github.com/rootless-containers/rootlesskit v1.1.1
github.com/shirou/gopsutil/v3 v3.24.1
github.com/sirupsen/logrus v1.9.3
@ -119,7 +119,7 @@ require (
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gin-gonic/gin v1.9.1 // indirect
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
@ -153,7 +153,6 @@ require (
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
@ -207,10 +206,10 @@ require (
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel v1.22.0 // indirect
go.opentelemetry.io/otel/metric v1.22.0 // indirect
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.22.0 // indirect
golang.org/x/arch v0.7.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/mod v0.14.0 // indirect
@ -219,7 +218,7 @@ require (
golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
google.golang.org/grpc v1.60.1 // indirect
google.golang.org/grpc v1.61.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect

29
go.sum
View File

@ -165,8 +165,8 @@ github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SU
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
@ -363,7 +363,6 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -460,8 +459,8 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510 h1:ILAESc7vHTVNKctTiR10XC+vACPlR4NbS6570G6QQmY=
github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510/go.mod h1:nOaQJMj7VZgdqATqES4GxZX/p6gwK2r7bpE3Ry63+jM=
github.com/openshift/imagebuilder v1.2.6 h1:ge+HILDVaB3c65KhH0nrM/Z1f9EdN8NUqxigd4qGqqo=
github.com/openshift/imagebuilder v1.2.6/go.mod h1:6VbTJ5CK7+OOTWcQlc/Cp86ML7pKlxOwCJNESQPbtgw=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
@ -472,7 +471,6 @@ github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOS
github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@ -497,7 +495,6 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rootless-containers/rootlesskit v1.1.1 h1:F5psKWoWY9/VjZ3ifVcaosjvFZJOagX85U22M0/EQZE=
github.com/rootless-containers/rootlesskit v1.1.1/go.mod h1:UD5GoA3dqKCJrnvnhVgQQnweMF2qZnf9KLw8EewcMZI=
@ -619,16 +616,16 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
@ -783,7 +780,7 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -791,8 +788,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0=
google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
| Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
package.
alternative API, with [interoperability](#slog-interoperability) provided by
some conversion functions.
### Inspiration
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller.
slog API.
## Using a `logr.Sink` as backend for slog
### Using a `logr.LogSink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
implementing both the normal logr interface(s) and `SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls.
Such an implementation should also support values that implement specific
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr
### Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the
other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
- Stack unwinding is done by the `SlogSink` and the resulting program
counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value.
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient.
## Context support for slog
### Context support for slog
Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
to fill this gap:
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
used to fill this gap. They store and retrieve a `slog.Logger` pointer
under the same context key that is also used by `NewContext` and
`FromContext` for `logr.Logger` value.
func HandlerFromContext(ctx context.Context) slog.Handler {
logger, err := logr.FromContext(ctx)
if err == nil {
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
automatically convert the `slog.Logger` to a
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
return logr.NewContext(ctx, slogr.NewLogr(handler))
}
With this approach, binaries which use either slog or logr are as efficient as
possible with no unnecessary allocations. This is also why the API stores a
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
on retrieval would need to allocate one.
The downside is that storing and retrieving a `slog.Handler` needs more
allocations compared to using a `logr.Logger`. Therefore the recommendation is
to use the `logr.Logger` API in code which uses contextual logging.
The downside is that switching back and forth needs more allocations. Because
logr is the API that is already in use by different packages, in particular
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
uses contextual logging.
An alternative to adding values to a logger and storing that logger in the
context is to store the values in the context and to configure a logging
backend to extract those values when emitting log entries. This only works when
log calls are passed the context, which is not supported by the logr API.
With the slog API, it is possible, but not
required. https://github.com/veqryn/slog-context is a package for slog which
provides additional support code for this approach. It also contains wrappers
for the context functions in logr, so developers who prefer to not use the logr
APIs directly can use those instead and the resulting code will still be
interoperable with logr.
## FAQ

33
vendor/github.com/go-logr/logr/context.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
// the value is always a Logger value. With Go >= 1.21, the value can be a
// Logger value or a slog.Logger pointer.
type contextKey struct{}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}

49
vendor/github.com/go-logr/logr/context_noslog.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
//go:build !go1.21
// +build !go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

83
vendor/github.com/go-logr/logr/context_slog.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"fmt"
"log/slog"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
v := ctx.Value(contextKey{})
if v == nil {
return Logger{}, notFoundError{}
}
switch v := v.(type) {
case Logger:
return v, nil
case *slog.Logger:
return FromSlogHandler(v.Handler()), nil
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
v := ctx.Value(contextKey{})
if v == nil {
return nil
}
switch v := v.(type) {
case Logger:
return slog.New(ToSlogHandler(v))
case *slog.Logger:
return v
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if logger, err := FromContext(ctx); err == nil {
return logger
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
// provided slog.Logger.
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

View File

@ -100,6 +100,11 @@ type Options struct {
// details, see docs for Go's time.Layout.
TimestampFormat string
// LogInfoLevel tells funcr what key to use to log the info level.
// If not specified, the info level will be logged as "level".
// If this is set to "", the info level will not be logged at all.
LogInfoLevel *string
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
if opts.LogInfoLevel == nil {
opts.LogInfoLevel = new(string)
*opts.LogInfoLevel = "level"
}
f := Formatter{
outputFormat: outfmt,
prefix: "",
@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
outputFormat outputFormat
prefix string
values []any
valuesStr string
depth int
opts *Options
outputFormat outputFormat
prefix string
values []any
valuesStr string
parentValuesStr string
depth int
opts *Options
group string // for slog groups
groupDepth int
}
// outputFormat indicates which outputFormat to use.
@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{')
buf.WriteByte('{') // for the whole line
}
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if len(f.valuesStr) > 0 {
if f.parentValuesStr != "" {
if continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
buf.WriteByte(' ')
}
buf.WriteByte(f.comma())
}
buf.WriteString(f.parentValuesStr)
continuing = true
buf.WriteString(f.valuesStr)
}
groupDepth := f.groupDepth
if f.group != "" {
if f.valuesStr != "" || len(args) != 0 {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
} else {
// The group was empty
groupDepth--
}
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
continuing = true
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
if f.outputFormat == outputJSON {
buf.WriteByte('}')
for i := 0; i < groupDepth; i++ {
buf.WriteByte('}') // for the groups
}
if f.outputFormat == outputJSON {
buf.WriteByte('}') // for the whole line
}
return buf.String()
}
@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
copied := false
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
if !copied {
newList := make([]any, len(kvList))
copy(newList, kvList)
kvList = newList
copied = true
}
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if i > 0 || continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
buf.WriteByte(f.comma())
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
}
}
if escapeKeys {
buf.WriteString(prettyString(k))
} else {
// this is faster
buf.WriteByte('"')
buf.WriteString(k)
buf.WriteByte('"')
}
if f.outputFormat == outputJSON {
buf.WriteByte(':')
} else {
buf.WriteByte('=')
}
buf.WriteString(f.quoted(k, escapeKeys))
buf.WriteByte(f.colon())
buf.WriteString(f.pretty(v))
}
return kvList
}
func (f Formatter) quoted(str string, escape bool) string {
if escape {
return prettyString(str)
}
// this is faster
return `"` + str + `"`
}
func (f Formatter) comma() byte {
if f.outputFormat == outputJSON {
return ','
}
return ' '
}
func (f Formatter) colon() byte {
if f.outputFormat == outputJSON {
return ':'
}
return '='
}
func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0)
}
@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping
buf.WriteString(prettyString(k))
buf.WriteByte(':')
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
continue
}
if printComma {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
name = fld.Name
}
// field names can't contain characters which need escaping
buf.WriteByte('"')
buf.WriteString(name)
buf.WriteByte('"')
buf.WriteByte(':')
buf.WriteString(f.quoted(name, false))
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
i := 0
for it.Next() {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
// If a map key supports TextMarshaler, use it.
keystr := ""
@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
}
}
buf.WriteString(keystr)
buf.WriteByte(':')
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
return kvList
}
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
func (f *Formatter) startGroup(group string) {
// Unnamed groups are just inlined.
if group == "" {
return
}
// Any saved values can no longer be changed.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
continuing := false
if f.parentValuesStr != "" {
buf.WriteString(f.parentValuesStr)
continuing = true
}
if f.group != "" && f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
}
// NOTE: We don't close the scope here - that's done later, when a log line
// is actually rendered (because we have N scopes to close).
f.parentValuesStr = buf.String()
// Start collecting new values.
f.group = group
f.groupDepth++
f.valuesStr = ""
f.values = nil
}
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
args = append(args, "level", level, "msg", msg)
if key := *f.opts.LogInfoLevel; key != "" {
args = append(args, key, level)
}
args = append(args, "msg", msg)
return prefix, f.render(args, kvList)
}

105
vendor/github.com/go-logr/logr/funcr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package funcr
import (
"context"
"log/slog"
"github.com/go-logr/logr"
)
var _ logr.SlogSink = &fnlogger{}
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, kvList)
return true
})
if record.Level >= slog.LevelError {
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
}
return nil
}
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, kvList)
}
l.AddValues(kvList)
return &l
}
func (l fnlogger) WithGroup(name string) logr.SlogSink {
l.startGroup(name)
return &l
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, grpKVs)
}
if attr.Key == "" {
// slog says we have to inline these
kvList = append(kvList, grpKVs...)
} else {
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
}
} else if attr.Key != "" {
kvList = append(kvList, attr.Key, attrVal.Any())
}
return kvList
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l fnlogger) levelFromSlog(level slog.Level) int {
result := -level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

View File

@ -207,10 +207,6 @@ limitations under the License.
// those.
package logr
import (
"context"
)
// New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users. Passing a nil sink will create
// a Logger which discards all log lines.
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
return l.sink == nil
}
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know.
type RuntimeInfo struct {

192
vendor/github.com/go-logr/logr/sloghandler.go generated vendored Normal file
View File

@ -0,0 +1,192 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
type slogHandler struct {
// May be nil, in which case all logs get discarded.
sink LogSink
// Non-nil if sink is non-nil and implements SlogSink.
slogSink SlogSink
// groupPrefix collects values from WithGroup calls. It gets added as
// prefix to value keys when handling a log record.
groupPrefix string
// levelBias can be set when constructing the handler to influence the
// slog.Level of log records. A positive levelBias reduces the
// slog.Level value. slog has no API to influence this value after the
// handler got created, so it can only be set indirectly through
// Logger.V.
levelBias slog.Level
}
var _ slog.Handler = &slogHandler{}
// groupSeparator is used to concatenate WithGroup names and attribute keys.
const groupSeparator = "."
// GetLevel is used for black box unit testing.
func (l *slogHandler) GetLevel() slog.Level {
return l.levelBias
}
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
}
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
if l.slogSink != nil {
// Only adjust verbosity level of log entries < slog.LevelError.
if record.Level < slog.LevelError {
record.Level -= l.levelBias
}
return l.slogSink.Handle(ctx, record)
}
// No need to check for nil sink here because Handle will only be called
// when Enabled returned true.
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
return true
})
if record.Level >= slog.LevelError {
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
}
return nil
}
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
// are called by Handle, code in slog gets skipped.
//
// This offset currently (Go 1.21.0) works for calls through
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
// chain won't change. Wrapping the handler will also break unwinding. It's
// still better than not adjusting at all....
//
// This cannot be done when constructing the handler because FromSlogHandler needs
// access to the original sink without this adjustment. A second copy would
// work, but then WithAttrs would have to be called for both of them.
func (l *slogHandler) sinkWithCallDepth() LogSink {
if sink, ok := l.sink.(CallDepthLogSink); ok {
return sink.WithCallDepth(2)
}
return l.sink
}
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
if l.sink == nil || len(attrs) == 0 {
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithAttrs(attrs)
clone.sink = clone.slogSink
} else {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
}
clone.sink = l.sink.WithValues(kvList...)
}
return &clone
}
func (l *slogHandler) WithGroup(name string) slog.Handler {
if l.sink == nil {
return l
}
if name == "" {
// slog says to inline empty groups
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithGroup(name)
clone.sink = clone.slogSink
} else {
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
}
return &clone
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
prefix := groupPrefix
if attr.Key != "" {
prefix = addPrefix(groupPrefix, attr.Key)
}
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, prefix, grpKVs)
}
kvList = append(kvList, grpKVs...)
} else if attr.Key != "" {
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
}
return kvList
}
func addPrefix(prefix, name string) string {
if prefix == "" {
return name
}
if name == "" {
return prefix
}
return prefix + groupSeparator + name
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l *slogHandler) levelFromSlog(level slog.Level) int {
result := -level
result += l.levelBias // in case the original Logger had a V level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

100
vendor/github.com/go-logr/logr/slogr.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
// FromSlogHandler returns a Logger which writes to the slog.Handler.
//
// The logr verbosity level is mapped to slog levels such that V(0) becomes
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
func FromSlogHandler(handler slog.Handler) Logger {
if handler, ok := handler.(*slogHandler); ok {
if handler.sink == nil {
return Discard()
}
return New(handler.sink).V(int(handler.levelBias))
}
return New(&slogSink{handler: handler})
}
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
//
// The returned logger writes all records with level >= slog.LevelError as
// error log entries with LogSink.Error, regardless of the verbosity level of
// the Logger:
//
// logger := <some Logger with 0 as verbosity level>
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
//
// The level of all other records gets reduced by the verbosity
// level of the Logger and the result is negated. If it happens
// to be negative, then it gets replaced by zero because a LogSink
// is not expected to handled negative levels:
//
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
func ToSlogHandler(logger Logger) slog.Handler {
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
return sink.handler
}
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
if slogSink, ok := handler.sink.(SlogSink); ok {
handler.slogSink = slogSink
}
return handler
}
// SlogSink is an optional interface that a LogSink can implement to support
// logging through the slog.Logger or slog.Handler APIs better. It then should
// also support special slog values like slog.Group. When used as a
// slog.Handler, the advantages are:
//
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
// as intended by slog
// - proper grouping of key/value pairs via WithGroup
// - verbosity levels > slog.LevelInfo can be recorded
// - less overhead
//
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
// well. Developers can pick whatever API suits them better and/or mix
// packages which use either API in the same binary with a common logging
// implementation.
//
// This interface is necessary because the type implementing the LogSink
// interface cannot also implement the slog.Handler interface due to the
// different prototype of the common Enabled method.
//
// An implementation could support both interfaces in two different types, but then
// additional interfaces would be needed to convert between those types in FromSlogHandler
// and ToSlogHandler.
type SlogSink interface {
LogSink
Handle(ctx context.Context, record slog.Record) error
WithAttrs(attrs []slog.Attr) SlogSink
WithGroup(name string) SlogSink
}

120
vendor/github.com/go-logr/logr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
"runtime"
"time"
)
var (
_ LogSink = &slogSink{}
_ CallDepthLogSink = &slogSink{}
_ Underlier = &slogSink{}
)
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
type Underlier interface {
// GetUnderlying returns the Handler used by the LogSink.
GetUnderlying() slog.Handler
}
const (
// nameKey is used to log the `WithName` values as an additional attribute.
nameKey = "logger"
// errKey is used to log the error parameter of Error as an additional attribute.
errKey = "err"
)
type slogSink struct {
callDepth int
name string
handler slog.Handler
}
func (l *slogSink) Init(info RuntimeInfo) {
l.callDepth = info.CallDepth
}
func (l *slogSink) GetUnderlying() slog.Handler {
return l.handler
}
func (l *slogSink) WithCallDepth(depth int) LogSink {
newLogger := *l
newLogger.callDepth += depth
return &newLogger
}
func (l *slogSink) Enabled(level int) bool {
return l.handler.Enabled(context.Background(), slog.Level(-level))
}
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
l.log(nil, msg, slog.Level(-level), kvList...)
}
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
l.log(err, msg, slog.LevelError, kvList...)
}
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
var pcs [1]uintptr
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
runtime.Callers(3+l.callDepth, pcs[:])
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
if l.name != "" {
record.AddAttrs(slog.String(nameKey, l.name))
}
if err != nil {
record.AddAttrs(slog.Any(errKey, err))
}
record.Add(kvList...)
_ = l.handler.Handle(context.Background(), record)
}
func (l slogSink) WithName(name string) LogSink {
if l.name != "" {
l.name += "/"
}
l.name += name
return &l
}
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
return &l
}
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
// We don't need the record itself, only its Add method.
record := slog.NewRecord(time.Time{}, 0, "", 0)
record.Add(kvList...)
attrs := make([]slog.Attr, 0, record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
attrs = append(attrs, attr)
return true
})
return attrs
}

View File

@ -6,7 +6,6 @@ services:
- docker
go:
- "1.19"
- "1.20"
before_install:

View File

@ -198,6 +198,21 @@ func (stages Stages) ByName(name string) (Stage, bool) {
return stage, true
}
}
if i, err := strconv.Atoi(name); err == nil {
return stages.ByPosition(i)
}
return Stage{}, false
}
func (stages Stages) ByPosition(position int) (Stage, bool) {
for _, stage := range stages {
// stage.Position is expected to be the same as the unnamed
// index variable for this loop, but comparing to the Position
// field's value is easier to explain
if stage.Position == position {
return stage, true
}
}
return Stage{}, false
}
@ -211,6 +226,16 @@ func (stages Stages) ByTarget(target string) (Stages, bool) {
return stages[i : i+1], true
}
}
if position, err := strconv.Atoi(target); err == nil {
for i, stage := range stages {
// stage.Position is expected to be the same as the unnamed
// index variable for this loop, but comparing to the Position
// field's value is easier to explain
if stage.Position == position {
return stages[i : i+1], true
}
}
}
return nil, false
}
@ -224,6 +249,16 @@ func (stages Stages) ThroughTarget(target string) (Stages, bool) {
return stages[0 : i+1], true
}
}
if position, err := strconv.Atoi(target); err == nil {
for i, stage := range stages {
// stage.Position is expected to be the same as the unnamed
// index variable for this loop, but comparing to the Position
// field's value is easier to explain
if stage.Position == position {
return stages[0 : i+1], true
}
}
}
return nil, false
}

View File

@ -12,7 +12,7 @@
#
%global golang_version 1.8.1
%{!?version: %global version 1.2.6-dev}
%{!?version: %global version 1.2.6}
%{!?release: %global release 1}
%global package_name imagebuilder
%global product_name Container Image Builder

View File

@ -8,6 +8,46 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.22.0/0.45.0] 2024-01-17
### Added
- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
- Experimental cardinality limiting is added to the metric SDK.
See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
### Changed
- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.19.0` version of the OpenTelemetry specification. (#4754)
- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
### Fixed
- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
## [1.21.0/0.44.0] 2023-11-16
### Removed
@ -2735,7 +2775,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.22.0...HEAD
[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0

View File

@ -591,6 +591,26 @@ this.
[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
### Ignoring context cancellation
OpenTelemetry API implementations need to ignore the cancellation of the context that are
passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
Recording methods should not return an error describing the cancellation state of the context
when they complete, nor should they abort any work.
This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
the method. In that case the context cancellation can be used for the timeout with the
restriction that this behavior is documented for the method. Otherwise, timeouts
are expected to be handled by the user calling the API, not the implementation.
Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
of a provider. It is assumed the context passed from a user is not used for this purpose.
Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
force flushing telemetry, shutting down a signal provider) the context cancellation
should be honored. This means all work done on behalf of the user provided context
should be canceled.
## Approvers and Maintainers
### Approvers
@ -610,6 +630,7 @@ this.
### Emeritus
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Josh MacDonald](https://github.com/jmacd), LightStep

View File

@ -66,7 +66,7 @@ are made for those systems currently.
## Getting Started
You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
OpenTelemetry's goal is to provide a single set of APIs to capture distributed
traces and metrics from your application and send them to an observability

View File

@ -123,12 +123,12 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
### Website Documentation
Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go].
Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/
[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go
[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
### Demo Repository

View File

@ -279,52 +279,75 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S
position--
kvs[offset], kvs[position] = kvs[position], kvs[offset]
}
kvs = kvs[position:]
if filter != nil {
return filterSet(kvs[position:], filter)
}
return Set{
equivalent: computeDistinct(kvs[position:]),
}, nil
}
// filterSet reorders kvs so that included keys are contiguous at the end of
// the slice, while excluded keys precede the included keys.
func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
var excluded []KeyValue
// Move attributes that do not match the filter so they're adjacent before
// calling computeDistinct().
distinctPosition := len(kvs)
// Swap indistinct keys forward and distinct keys toward the
// end of the slice.
offset := len(kvs) - 1
for ; offset >= 0; offset-- {
if filter(kvs[offset]) {
distinctPosition--
kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
continue
if div := filteredToFront(kvs, filter); div != 0 {
return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
}
}
excluded = kvs[:distinctPosition]
return Set{equivalent: computeDistinct(kvs)}, nil
}
return Set{
equivalent: computeDistinct(kvs[distinctPosition:]),
}, excluded
// filteredToFront filters slice in-place using keep function. All KeyValues that need to
// be removed are moved to the front. All KeyValues that need to be kept are
// moved (in-order) to the back. The index for the first KeyValue to be kept is
// returned.
func filteredToFront(slice []KeyValue, keep Filter) int {
n := len(slice)
j := n
for i := n - 1; i >= 0; i-- {
if keep(slice[i]) {
j--
slice[i], slice[j] = slice[j], slice[i]
}
}
return j
}
// Filter returns a filtered copy of this Set. See the documentation for
// NewSetWithSortableFiltered for more details.
func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if re == nil {
return Set{
equivalent: l.equivalent,
}, nil
return *l, nil
}
// Note: This could be refactored to avoid the temporary slice
// allocation, if it proves to be expensive.
return filterSet(l.ToSlice(), re)
// Iterate in reverse to the first attribute that will be filtered out.
n := l.Len()
first := n - 1
for ; first >= 0; first-- {
kv, _ := l.Get(first)
if !re(kv) {
break
}
}
// No attributes will be dropped, return the immutable Set l and nil.
if first < 0 {
return *l, nil
}
// Copy now that we know we need to return a modified set.
//
// Do not do this in-place on the underlying storage of *Set l. Sets are
// immutable and filtering should not change this.
slice := l.ToSlice()
// Don't re-iterate the slice if only slice[0] is filtered.
if first == 0 {
// It is safe to assume len(slice) >= 1 given we found at least one
// attribute above that needs to be filtered out.
return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
}
// Move the filtered slice[first] to the front (preserving order).
kv := slice[first]
copy(slice[1:first+1], slice[:first])
slice[0] = kv
// Do not re-evaluate re(slice[first+1:]).
div := filteredToFront(slice[1:first+1], re) + 1
return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
}
// computeDistinct returns a Distinct using either the fixed- or

View File

@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"go.opentelemetry.io/otel/internal/baggage"
@ -32,16 +31,6 @@ const (
listDelimiter = ","
keyValueDelimiter = "="
propertyDelimiter = ";"
keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
)
var (
keyRe = regexp.MustCompile(`^` + keyDef + `$`)
valueRe = regexp.MustCompile(`^` + valueDef + `$`)
propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
)
var (
@ -67,7 +56,7 @@ type Property struct {
//
// If key is invalid, an error will be returned.
func NewKeyProperty(key string) (Property, error) {
if !keyRe.MatchString(key) {
if !validateKey(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
@ -77,14 +66,29 @@ func NewKeyProperty(key string) (Property, error) {
// NewKeyValueProperty returns a new Property for key with value.
//
// If key or value are invalid, an error will be returned.
// The passed key must be compliant with W3C Baggage specification.
// The passed value must be precent-encoded as defined in W3C Baggage specification.
//
// Notice: Consider using [NewKeyValuePropertyRaw] instead
// that does not require precent-encoding of the value.
func NewKeyValueProperty(key, value string) (Property, error) {
if !keyRe.MatchString(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
if !validateValue(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
return NewKeyValuePropertyRaw(key, decodedValue)
}
// NewKeyValuePropertyRaw returns a new Property for key with value.
//
// The passed key must be compliant with W3C Baggage specification.
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
if !validateKey(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
p := Property{
key: key,
@ -106,20 +110,11 @@ func parseProperty(property string) (Property, error) {
return newInvalidProperty(), nil
}
match := propertyRe.FindStringSubmatch(property)
if len(match) != 4 {
p, ok := parsePropertyInternal(property)
if !ok {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
var p Property
if match[1] != "" {
p.key = match[1]
} else {
p.key = match[2]
p.value = match[3]
p.hasValue = true
}
return p, nil
}
@ -130,12 +125,9 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
if !keyRe.MatchString(p.key) {
if !validateKey(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
if p.hasValue && !valueRe.MatchString(p.value) {
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
}
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
@ -154,11 +146,11 @@ func (p Property) Value() (string, bool) {
return p.value, p.hasValue
}
// String encodes Property into a string compliant with the W3C Baggage
// String encodes Property into a header string compliant with the W3C Baggage
// specification.
func (p Property) String() string {
if p.hasValue {
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
}
return p.key
}
@ -218,7 +210,7 @@ func (p properties) validate() error {
return nil
}
// String encodes properties into a string compliant with the W3C Baggage
// String encodes properties into a header string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
props := make([]string, len(p))
@ -240,11 +232,28 @@ type Member struct {
hasData bool
}
// NewMember returns a new Member from the passed arguments. The key will be
// used directly while the value will be url decoded after validation. An error
// is returned if the created Member would be invalid according to the W3C
// Baggage specification.
// NewMemberRaw returns a new Member from the passed arguments.
//
// The passed key must be compliant with W3C Baggage specification.
// The passed value must be precent-encoded as defined in W3C Baggage specification.
//
// Notice: Consider using [NewMemberRaw] instead
// that does not require precent-encoding of the value.
func NewMember(key, value string, props ...Property) (Member, error) {
if !validateValue(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
return NewMemberRaw(key, decodedValue, props...)
}
// NewMemberRaw returns a new Member from the passed arguments.
//
// The passed key must be compliant with W3C Baggage specification.
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
m := Member{
key: key,
value: value,
@ -254,11 +263,6 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
m.value = decodedValue
return m, nil
}
@ -274,11 +278,7 @@ func parseMember(member string) (Member, error) {
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
}
var (
key, value string
props properties
)
var props properties
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
@ -299,36 +299,34 @@ func parseMember(member string) (Member, error) {
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
key = strings.TrimSpace(k)
var err error
value, err = url.PathUnescape(strings.TrimSpace(v))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
if !keyRe.MatchString(key) {
key := strings.TrimSpace(k)
if !validateKey(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
val := strings.TrimSpace(v)
if !validateValue(val) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
}
// Decode a precent-encoded value.
value, err := url.PathUnescape(val)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err)
}
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
// validate ensures m conforms to the W3C Baggage specification.
// A key is just an ASCII string, but a value must be URL encoded UTF-8,
// returning an error otherwise.
// A key must be an ASCII string, returning an error otherwise.
func (m Member) validate() error {
if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
if !keyRe.MatchString(m.key) {
if !validateKey(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
if !valueRe.MatchString(m.value) {
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
}
return m.properties.validate()
}
@ -341,11 +339,13 @@ func (m Member) Value() string { return m.value }
// Properties returns a copy of the Member properties.
func (m Member) Properties() []Property { return m.properties.Copy() }
// String encodes Member into a string compliant with the W3C Baggage
// String encodes Member into a header string compliant with the W3C Baggage
// specification.
func (m Member) String() string {
// A key is just an ASCII string, but a value is URL encoded UTF-8.
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
// A key is just an ASCII string. A value is restricted to be
// US-ASCII characters excluding CTLs, whitespace,
// DQUOTE, comma, semicolon, and backslash.
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value))
if len(m.properties) > 0 {
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
}
@ -536,9 +536,8 @@ func (b Baggage) Len() int {
return len(b.list)
}
// String encodes Baggage into a string compliant with the W3C Baggage
// specification. The returned string will be invalid if the Baggage contains
// any invalid list-members.
// String encodes Baggage into a header string compliant with the W3C Baggage
// specification.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
@ -550,3 +549,196 @@ func (b Baggage) String() string {
}
return strings.Join(members, listDelimiter)
}
// parsePropertyInternal attempts to decode a Property from the passed string.
// It follows the spec at https://www.w3.org/TR/baggage/#definition.
func parsePropertyInternal(s string) (p Property, ok bool) {
// For the entire function we will use " key = value " as an example.
// Attempting to parse the key.
// First skip spaces at the beginning "< >key = value " (they could be empty).
index := skipSpace(s, 0)
// Parse the key: " <key> = value ".
keyStart := index
keyEnd := index
for _, c := range s[keyStart:] {
if !validateKeyChar(c) {
break
}
keyEnd++
}
// If we couldn't find any valid key character,
// it means the key is either empty or invalid.
if keyStart == keyEnd {
return
}
// Skip spaces after the key: " key< >= value ".
index = skipSpace(s, keyEnd)
if index == len(s) {
// A key can have no value, like: " key ".
ok = true
p.key = s[keyStart:keyEnd]
return
}
// If we have not reached the end and we can't find the '=' delimiter,
// it means the property is invalid.
if s[index] != keyValueDelimiter[0] {
return
}
// Attempting to parse the value.
// Match: " key =< >value ".
index = skipSpace(s, index+1)
// Match the value string: " key = <value> ".
// A valid property can be: " key =".
// Therefore, we don't have to check if the value is empty.
valueStart := index
valueEnd := index
for _, c := range s[valueStart:] {
if !validateValueChar(c) {
break
}
valueEnd++
}
// Skip all trailing whitespaces: " key = value< >".
index = skipSpace(s, valueEnd)
// If after looking for the value and skipping whitespaces
// we have not reached the end, it means the property is
// invalid, something like: " key = value value1".
if index != len(s) {
return
}
// Decode a precent-encoded value.
value, err := url.PathUnescape(s[valueStart:valueEnd])
if err != nil {
return
}
ok = true
p.key = s[keyStart:keyEnd]
p.hasValue = true
p.value = value
return
}
func skipSpace(s string, offset int) int {
i := offset
for ; i < len(s); i++ {
c := s[i]
if c != ' ' && c != '\t' {
break
}
}
return i
}
func validateKey(s string) bool {
if len(s) == 0 {
return false
}
for _, c := range s {
if !validateKeyChar(c) {
return false
}
}
return true
}
func validateKeyChar(c int32) bool {
return (c >= 0x23 && c <= 0x27) ||
(c >= 0x30 && c <= 0x39) ||
(c >= 0x41 && c <= 0x5a) ||
(c >= 0x5e && c <= 0x7a) ||
c == 0x21 ||
c == 0x2a ||
c == 0x2b ||
c == 0x2d ||
c == 0x2e ||
c == 0x7c ||
c == 0x7e
}
func validateValue(s string) bool {
for _, c := range s {
if !validateValueChar(c) {
return false
}
}
return true
}
func validateValueChar(c int32) bool {
return c == 0x21 ||
(c >= 0x23 && c <= 0x2b) ||
(c >= 0x2d && c <= 0x3a) ||
(c >= 0x3c && c <= 0x5b) ||
(c >= 0x5d && c <= 0x7e)
}
// valueEscape escapes the string so it can be safely placed inside a baggage value,
// replacing special characters with %XX sequences as needed.
//
// The implementation is based on:
// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
func valueEscape(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
}
}
if hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
j := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(s[i]) {
const upperhex = "0123456789ABCDEF"
t[j] = '%'
t[j+1] = upperhex[c>>4]
t[j+2] = upperhex[c&15]
j += 3
} else {
t[j] = c
j++
}
}
return string(t)
}
// shouldEscape returns true if the specified byte should be escaped when
// appearing in a baggage value string.
func shouldEscape(c byte) bool {
if c == '%' {
// The percent character must be encoded so that percent-encoding can work.
return true
}
return !validateValueChar(int32(c))
}

View File

@ -22,7 +22,7 @@ transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
exporters are used to process and transport this data.
To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/.
To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
To read more about tracing, see go.opentelemetry.io/otel/trace.

View File

@ -18,7 +18,7 @@ import (
"context"
"encoding/hex"
"fmt"
"regexp"
"strings"
"go.opentelemetry.io/otel/trace"
)
@ -28,6 +28,7 @@ const (
maxVersion = 254
traceparentHeader = "traceparent"
tracestateHeader = "tracestate"
delimiter = "-"
)
// TraceContext is a propagator that supports the W3C Trace Context format
@ -41,8 +42,8 @@ const (
type TraceContext struct{}
var (
_ TextMapPropagator = TraceContext{}
traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
_ TextMapPropagator = TraceContext{}
versionPart = fmt.Sprintf("%.2X", supportedVersion)
)
// Inject set tracecontext from the Context into the carrier.
@ -59,12 +60,19 @@ func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
// Clear all flags other than the trace-context supported sampling bit.
flags := sc.TraceFlags() & trace.FlagsSampled
h := fmt.Sprintf("%.2x-%s-%s-%s",
supportedVersion,
sc.TraceID(),
sc.SpanID(),
flags)
carrier.Set(traceparentHeader, h)
var sb strings.Builder
sb.Grow(2 + 32 + 16 + 2 + 3)
_, _ = sb.WriteString(versionPart)
traceID := sc.TraceID()
spanID := sc.SpanID()
flagByte := [1]byte{byte(flags)}
var buf [32]byte
for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
_ = sb.WriteByte(delimiter[0])
n := hex.Encode(buf[:], src)
_, _ = sb.Write(buf[:n])
}
carrier.Set(traceparentHeader, sb.String())
}
// Extract reads tracecontext from the carrier into a returned Context.
@ -86,21 +94,8 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return trace.SpanContext{}
}
matches := traceCtxRegExp.FindStringSubmatch(h)
if len(matches) == 0 {
return trace.SpanContext{}
}
if len(matches) < 5 { // four subgroups plus the overall match
return trace.SpanContext{}
}
if len(matches[1]) != 2 {
return trace.SpanContext{}
}
ver, err := hex.DecodeString(matches[1])
if err != nil {
var ver [1]byte
if !extractPart(ver[:], &h, 2) {
return trace.SpanContext{}
}
version := int(ver[0])
@ -108,36 +103,24 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return trace.SpanContext{}
}
if version == 0 && len(matches) != 5 { // four subgroups plus the overall match
return trace.SpanContext{}
}
if len(matches[2]) != 32 {
return trace.SpanContext{}
}
var scc trace.SpanContextConfig
scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32])
if err != nil {
if !extractPart(scc.TraceID[:], &h, 32) {
return trace.SpanContext{}
}
if !extractPart(scc.SpanID[:], &h, 16) {
return trace.SpanContext{}
}
if len(matches[3]) != 16 {
var opts [1]byte
if !extractPart(opts[:], &h, 2) {
return trace.SpanContext{}
}
scc.SpanID, err = trace.SpanIDFromHex(matches[3])
if err != nil {
if version == 0 && (h != "" || opts[0] > 2) {
// version 0 not allow extra
// version 0 not allow other flag
return trace.SpanContext{}
}
if len(matches[4]) != 2 {
return trace.SpanContext{}
}
opts, err := hex.DecodeString(matches[4])
if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) {
return trace.SpanContext{}
}
// Clear all flags other than the trace-context supported sampling bit.
scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
@ -155,6 +138,29 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return sc
}
// upperHex detect hex is upper case Unicode characters.
func upperHex(v string) bool {
for _, c := range v {
if c >= 'A' && c <= 'F' {
return true
}
}
return false
}
func extractPart(dst []byte, h *string, n int) bool {
part, left, _ := strings.Cut(*h, delimiter)
*h = left
// hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
if len(part) != n || upperHex(part) {
return false
}
if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
return false
}
return true
}
// Fields returns the keys who's values are set with Inject.
func (tc TraceContext) Fields() []string {
return []string{traceparentHeader, tracestateHeader}

View File

@ -17,20 +17,14 @@ package trace // import "go.opentelemetry.io/otel/trace"
import (
"encoding/json"
"fmt"
"regexp"
"strings"
)
const (
maxListMembers = 32
listDelimiter = ","
// based on the W3C Trace Context specification, see
// https://www.w3.org/TR/trace-context-1/#tracestate-header
noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*`
withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*`
valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
listDelimiters = ","
memberDelimiter = "="
errInvalidKey errorConst = "invalid tracestate key"
errInvalidValue errorConst = "invalid tracestate value"
@ -39,43 +33,128 @@ const (
errDuplicate errorConst = "duplicate list-member in tracestate"
)
var (
noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`)
withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`)
valueRe = regexp.MustCompile(`^` + valueFormat + `$`)
memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
)
type member struct {
Key string
Value string
}
// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
func checkValueChar(v byte) bool {
return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
}
// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
func checkValueLast(v byte) bool {
return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
}
// based on the W3C Trace Context specification
//
// value = (0*255(chr)) nblk-chr
// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
// chr = %x20 / nblk-chr
//
// see https://www.w3.org/TR/trace-context-1/#value
func checkValue(val string) bool {
n := len(val)
if n == 0 || n > 256 {
return false
}
for i := 0; i < n-1; i++ {
if !checkValueChar(val[i]) {
return false
}
}
return checkValueLast(val[n-1])
}
func checkKeyRemain(key string) bool {
// ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
for _, v := range key {
if isAlphaNum(byte(v)) {
continue
}
switch v {
case '_', '-', '*', '/':
continue
}
return false
}
return true
}
// according to
//
// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
//
// param n is remain part length, should be 255 in simple-key or 13 in system-id.
func checkKeyPart(key string, n int) bool {
if len(key) == 0 {
return false
}
first := key[0] // key's first char
ret := len(key[1:]) <= n
ret = ret && first >= 'a' && first <= 'z'
return ret && checkKeyRemain(key[1:])
}
func isAlphaNum(c byte) bool {
if c >= 'a' && c <= 'z' {
return true
}
return c >= '0' && c <= '9'
}
// according to
//
// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
//
// param n is remain part length, should be 240 exactly.
func checkKeyTenant(key string, n int) bool {
if len(key) == 0 {
return false
}
return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
}
// based on the W3C Trace Context specification
//
// key = simple-key / multi-tenant-key
// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
// multi-tenant-key = tenant-id "@" system-id
// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
// lcalpha = %x61-7A ; a-z
//
// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
func checkKey(key string) bool {
tenant, system, ok := strings.Cut(key, "@")
if !ok {
return checkKeyPart(key, 255)
}
return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
}
func newMember(key, value string) (member, error) {
if len(key) > 256 {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
if !checkKey(key) {
return member{}, errInvalidKey
}
if !noTenantKeyRe.MatchString(key) {
if !withTenantKeyRe.MatchString(key) {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
}
atIndex := strings.LastIndex(key, "@")
if atIndex > 241 || len(key)-1-atIndex > 14 {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
}
}
if len(value) > 256 || !valueRe.MatchString(value) {
return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
if !checkValue(value) {
return member{}, errInvalidValue
}
return member{Key: key, Value: value}, nil
}
func parseMember(m string) (member, error) {
matches := memberRe.FindStringSubmatch(m)
if len(matches) != 3 {
key, val, ok := strings.Cut(m, memberDelimiter)
if !ok {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
result, e := newMember(matches[1], matches[2])
key = strings.TrimLeft(key, " \t")
val = strings.TrimRight(val, " \t")
result, e := newMember(key, val)
if e != nil {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
@ -85,7 +164,7 @@ func parseMember(m string) (member, error) {
// String encodes member into a string compliant with the W3C Trace Context
// specification.
func (m member) String() string {
return fmt.Sprintf("%s=%s", m.Key, m.Value)
return m.Key + "=" + m.Value
}
// TraceState provides additional vendor-specific trace identification
@ -109,8 +188,8 @@ var _ json.Marshaler = TraceState{}
// ParseTraceState attempts to decode a TraceState from the passed
// string. It returns an error if the input is invalid according to the W3C
// Trace Context specification.
func ParseTraceState(tracestate string) (TraceState, error) {
if tracestate == "" {
func ParseTraceState(ts string) (TraceState, error) {
if ts == "" {
return TraceState{}, nil
}
@ -120,7 +199,9 @@ func ParseTraceState(tracestate string) (TraceState, error) {
var members []member
found := make(map[string]struct{})
for _, memberStr := range strings.Split(tracestate, listDelimiter) {
for ts != "" {
var memberStr string
memberStr, ts, _ = strings.Cut(ts, listDelimiters)
if len(memberStr) == 0 {
continue
}
@ -153,11 +234,29 @@ func (ts TraceState) MarshalJSON() ([]byte, error) {
// Trace Context specification. The returned string will be invalid if the
// TraceState contains any invalid members.
func (ts TraceState) String() string {
members := make([]string, len(ts.list))
for i, m := range ts.list {
members[i] = m.String()
if len(ts.list) == 0 {
return ""
}
return strings.Join(members, listDelimiter)
var n int
n += len(ts.list) // member delimiters: '='
n += len(ts.list) - 1 // list delimiters: ','
for _, mem := range ts.list {
n += len(mem.Key)
n += len(mem.Value)
}
var sb strings.Builder
sb.Grow(n)
_, _ = sb.WriteString(ts.list[0].Key)
_ = sb.WriteByte('=')
_, _ = sb.WriteString(ts.list[0].Value)
for i := 1; i < len(ts.list); i++ {
_ = sb.WriteByte(listDelimiters[0])
_, _ = sb.WriteString(ts.list[i].Key)
_ = sb.WriteByte('=')
_, _ = sb.WriteString(ts.list[i].Value)
}
return sb.String()
}
// Get returns the value paired with key from the corresponding TraceState
@ -189,15 +288,25 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) {
if err != nil {
return ts, err
}
cTS := ts.Delete(key)
if cTS.Len()+1 <= maxListMembers {
cTS.list = append(cTS.list, member{})
n := len(ts.list)
found := n
for i := range ts.list {
if ts.list[i].Key == key {
found = i
}
}
cTS := TraceState{}
if found == n && n < maxListMembers {
cTS.list = make([]member, n+1)
} else {
cTS.list = make([]member, n)
}
// When the number of members exceeds capacity, drop the "right-most".
copy(cTS.list[1:], cTS.list)
cTS.list[0] = m
// When the number of members exceeds capacity, drop the "right-most".
copy(cTS.list[1:], ts.list[0:found])
if found < n {
copy(cTS.list[1+found:], ts.list[found+1:])
}
return cTS, nil
}

View File

@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.21.0"
return "1.22.0"
}

View File

@ -14,7 +14,7 @@
module-sets:
stable-v1:
version: v1.21.0
version: v1.22.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
@ -34,7 +34,7 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
version: v0.44.0
version: v0.45.0
modules:
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test

View File

@ -430,7 +430,7 @@ type ClientHeader struct {
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
// A single process may be used to run multiple virtual
// servers with different identities.
// The authority is the name of such a server identitiy.
// The authority is the name of such a server identity.
// It is typically a portion of the URI in the form of
// <host> or <host>:<port> .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`

View File

@ -1860,27 +1860,15 @@ func (cc *ClientConn) determineAuthority() error {
}
endpoint := cc.parsedTarget.Endpoint()
target := cc.target
switch {
case authorityFromDialOption != "":
if authorityFromDialOption != "" {
cc.authority = authorityFromDialOption
case authorityFromCreds != "":
} else if authorityFromCreds != "" {
cc.authority = authorityFromCreds
case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
// TODO: remove when the unix resolver implements optional interface to
// return channel authority.
cc.authority = "localhost"
case strings.HasPrefix(endpoint, ":"):
} else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
cc.authority = auth.OverrideAuthority(cc.parsedTarget)
} else if strings.HasPrefix(endpoint, ":") {
cc.authority = "localhost" + endpoint
default:
// TODO: Define an optional interface on the resolver builder to return
// the channel authority given the user's dial target. For resolvers
// which don't implement this interface, we will use the endpoint from
// "scheme://authority/endpoint" as the default authority.
// Escape the endpoint to handle use cases where the endpoint
// might not be a valid authority by default.
// For example an endpoint which has multiple paths like
// 'a/b/c', which is not a valid authority by default.
} else {
cc.authority = encodeAuthority(endpoint)
}
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)

View File

@ -57,7 +57,7 @@ var (
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
// stored in the passed in attributes. This is set by
// credentials/xds/xds.go.
GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo
GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
// GetServerCredentials returns the transport credentials configured on a
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
@ -68,11 +68,6 @@ var (
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
CanonicalString any // func (codes.Code) string
// DrainServerTransports initiates a graceful close of existing connections
// on a gRPC server accepted on the provided listener address. An
// xDS-enabled server invokes this method on a grpc.Server when a particular
// listener moves to "not-serving" mode.
DrainServerTransports any // func(*grpc.Server, string)
// IsRegisteredMethod returns whether the passed in method is registered as
// a method on the server.
IsRegisteredMethod any // func(*grpc.Server, string) bool
@ -188,6 +183,19 @@ var (
ExitIdleModeForTesting any // func(*grpc.ClientConn) error
ChannelzTurnOffForTesting func()
// TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found
// error for a given resource type and name. This is usually triggered when
// the associated watch timer fires. For testing purposes, having this
// function makes events more predictable than relying on timer events.
TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error
// TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton
// to invoke resource not found for a resource type name and resource name.
TriggerXDSResourceNameNotFoundClient any // func(string, string) error
// FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD.
FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.

View File

@ -61,6 +61,10 @@ func (b *builder) Scheme() string {
return b.scheme
}
func (b *builder) OverrideAuthority(resolver.Target) string {
return "localhost"
}
type nopResolver struct {
}

View File

@ -1,4 +1,4 @@
//go:build !unix
//go:build !unix && !windows
/*
* Copyright 2023 gRPC authors.

View File

@ -0,0 +1,54 @@
//go:build windows
/*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package internal
import (
"net"
"syscall"
"time"
"golang.org/x/sys/windows"
)
// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
// the underlying connection with OS default values for keepalive parameters.
//
// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
// appropriate Go version becomes less than our least supported Go version, we
// should look into using the new API to make things more straightforward.
func NetDialerWithTCPKeepalive() *net.Dialer {
return &net.Dialer{
// Setting a negative value here prevents the Go stdlib from overriding
// the values of TCP keepalive time and interval. It also prevents the
// Go stdlib from enabling TCP keepalives by default.
KeepAlive: time.Duration(-1),
// This method is called after the underlying network socket is created,
// but before dialing the socket (or calling its connect() method). The
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
// the TCP keealive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
})
},
}
}

View File

@ -59,6 +59,8 @@ import (
// atomically.
var clientConnectionCounter uint64
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
@ -568,7 +570,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
}
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
var k string
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
@ -1323,10 +1325,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
if streamID > id && streamID <= upperLimit {
atomic.StoreUint32(&stream.unprocessed, 1)
streamsToClose = append(streamsToClose, stream)
}
atomic.StoreUint32(&stream.unprocessed, 1)
streamsToClose = append(streamsToClose, stream)
}
}
t.mu.Unlock()

View File

@ -960,7 +960,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
}
}
if err := t.writeHeaderLocked(s); err != nil {
return status.Convert(err).Err()
switch e := err.(type) {
case ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
return status.Convert(err).Err()
}
}
return nil
}

View File

@ -25,8 +25,14 @@ import (
"context"
"fmt"
"strings"
"google.golang.org/grpc/internal"
)
func init() {
internal.FromOutgoingContextRaw = fromOutgoingContextRaw
}
// DecodeKeyValue returns k, v, nil.
//
// Deprecated: use k and v directly instead.
@ -238,16 +244,13 @@ func copyOf(v []string) []string {
return vals
}
// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
//
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
// is a map, there's no guarantee it's created using our helper functions) and
// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
// lowercase).
//
// This is intended for gRPC-internal use ONLY. Users should use
// FromOutgoingContext instead.
func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok {
return nil, nil, false

View File

@ -314,3 +314,13 @@ type Resolver interface {
// Close closes the resolver.
Close()
}
// AuthorityOverrider is implemented by Builders that wish to override the
// default authority for the ClientConn.
// By default, the authority used is target.Endpoint().
type AuthorityOverrider interface {
// OverrideAuthority returns the authority to use for a ClientConn with the
// given target. The implementation must generate it without blocking,
// typically in line, and must keep it unchanged.
OverrideAuthority(Target) string
}

View File

@ -640,14 +640,18 @@ func encode(c baseCodec, msg any) ([]byte, error) {
return b, nil
}
// compress returns the input bytes compressed by compressor or cp. If both
// compressors are nil, returns nil.
// compress returns the input bytes compressed by compressor or cp.
// If both compressors are nil, or if the message has zero length, returns nil,
// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
if compressor == nil && cp == nil {
return nil, nil
}
if len(in) == 0 {
return nil, nil
}
wrapErr := func(err error) error {
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}

View File

@ -74,9 +74,6 @@ func init() {
return srv.isRegisteredMethod(method)
}
internal.ServerFromContext = serverFromContext
internal.DrainServerTransports = func(srv *Server, addr string) {
srv.drainServerTransports(addr)
}
internal.AddGlobalServerOptions = func(opt ...ServerOption) {
globalServerOptions = append(globalServerOptions, opt...)
}
@ -139,7 +136,8 @@ type Server struct {
quit *grpcsync.Event
done *grpcsync.Event
channelzRemoveOnce sync.Once
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
handlersWG sync.WaitGroup // counts active method handler goroutines
channelzID *channelz.Identifier
czData *channelzData
@ -176,6 +174,7 @@ type serverOptions struct {
headerTableSize *uint32
numServerWorkers uint32
recvBufferPool SharedBufferPool
waitForHandlers bool
}
var defaultServerOptions = serverOptions{
@ -573,6 +572,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
})
}
// WaitForHandlers cause Stop to wait until all outstanding method handlers have
// exited before returning. If false, Stop will return as soon as all
// connections have closed, but method handlers may still be running. By
// default, Stop does not wait for method handlers to return.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WaitForHandlers(w bool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.waitForHandlers = w
})
}
// RecvBufferPool returns a ServerOption that configures the server
// to use the provided shared buffer pool for parsing incoming messages. Depending
// on the application's workload, this could result in reduced memory allocation.
@ -932,6 +946,12 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
return
}
if cc, ok := rawConn.(interface {
PassServerTransport(transport.ServerTransport)
}); ok {
cc.PassServerTransport(st)
}
if !s.addConn(lisAddr, st) {
return
}
@ -941,15 +961,6 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
}()
}
func (s *Server) drainServerTransports(addr string) {
s.mu.Lock()
conns := s.conns[addr]
for st := range conns {
st.Drain("")
}
s.mu.Unlock()
}
// newHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go).
func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
@ -1010,9 +1021,11 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport,
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
st.HandleStreams(ctx, func(stream *transport.Stream) {
s.handlersWG.Add(1)
streamQuota.acquire()
f := func() {
defer streamQuota.release()
defer s.handlersWG.Done()
s.handleStream(st, stream)
}
@ -1911,6 +1924,10 @@ func (s *Server) stop(graceful bool) {
s.serverWorkerChannelClose()
}
if graceful || s.opts.waitForHandlers {
s.handlersWG.Wait()
}
if s.events != nil {
s.events.Finish()
s.events = nil

View File

@ -48,6 +48,8 @@ import (
"google.golang.org/grpc/status"
)
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// StreamHandler defines the handler called by gRPC server to complete the
// execution of a streaming RPC.
//
@ -184,7 +186,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// when the RPC completes.
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
// validate md
if err := imetadata.Validate(md); err != nil {
return nil, status.Error(codes.Internal, err.Error())

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.60.1"
const Version = "1.61.0"

View File

@ -88,7 +88,7 @@ not git grep -l 'x/net/context' -- "*.go"
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test'
# - Do not use "interface{}"; use "any" instead.
git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc'
git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate'
# - Do not call grpclog directly. Use grpclog.Component instead.
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
@ -127,7 +127,7 @@ staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true
grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)"
# Exclude underscore checks for generated code.
grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)'
grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)'
# Error for duplicate imports not including grpc protos.
grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
@ -152,6 +152,7 @@ grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
XXXXX Protobuf related deprecation errors:
"github.com/golang/protobuf
.pb.go:
grpc_testing_not_regenerate
: ptypes.
proto.RegisterType
XXXXX gRPC internal usage deprecation errors:
@ -184,9 +185,6 @@ GetSafeRegexMatch
GetSuffixMatch
GetTlsCertificateCertificateProviderInstance
GetValidationContextCertificateProviderInstance
XXXXX TODO: Remove the below deprecation usages:
CloseNotifier
Roots.Subjects
XXXXX PleaseIgnoreUnused'
echo SUCCESS

14
vendor/modules.txt vendored
View File

@ -552,7 +552,7 @@ github.com/gin-gonic/gin/render
github.com/go-jose/go-jose/v3
github.com/go-jose/go-jose/v3/cipher
github.com/go-jose/go-jose/v3/json
# github.com/go-logr/logr v1.3.0
# github.com/go-logr/logr v1.4.1
## explicit; go 1.18
github.com/go-logr/logr
github.com/go-logr/logr/funcr
@ -738,8 +738,6 @@ github.com/klauspost/pgzip
# github.com/kr/fs v0.1.0
## explicit
github.com/kr/fs
# github.com/kr/pretty v0.3.1
## explicit; go 1.12
# github.com/leodido/go-urn v1.2.4
## explicit; go 1.16
github.com/leodido/go-urn
@ -909,7 +907,7 @@ github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalk
github.com/opencontainers/selinux/pkg/pwalkdir
# github.com/openshift/imagebuilder v1.2.6-0.20231127234745-ef2a5fe47510
# github.com/openshift/imagebuilder v1.2.6
## explicit; go 1.19
github.com/openshift/imagebuilder
github.com/openshift/imagebuilder/dockerfile/command
@ -1123,7 +1121,7 @@ go.opencensus.io/trace/tracestate
## explicit; go 1.19
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
# go.opentelemetry.io/otel v1.21.0
# go.opentelemetry.io/otel v1.22.0
## explicit; go 1.20
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@ -1137,13 +1135,13 @@ go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/internal
go.opentelemetry.io/otel/semconv/v1.12.0
go.opentelemetry.io/otel/semconv/v1.17.0
# go.opentelemetry.io/otel/metric v1.21.0
# go.opentelemetry.io/otel/metric v1.22.0
## explicit; go 1.20
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
# go.opentelemetry.io/otel/sdk v1.21.0
## explicit; go 1.20
# go.opentelemetry.io/otel/trace v1.21.0
# go.opentelemetry.io/otel/trace v1.22.0
## explicit; go 1.20
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
@ -1283,7 +1281,7 @@ google.golang.org/appengine/urlfetch
# google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.60.1
# google.golang.org/grpc v1.61.0
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes