mirror of
https://github.com/superseriousbusiness/gotosocial
synced 2024-11-22 12:23:19 +00:00
feat: initial tracing support (#1623)
This commit is contained in:
parent
878ed48de3
commit
6392e00653
472 changed files with 102600 additions and 12 deletions
|
@ -32,6 +32,7 @@ import (
|
|||
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/middleware"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/tracing"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
|
@ -70,6 +71,12 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
|||
state.Caches.Start()
|
||||
defer state.Caches.Stop()
|
||||
|
||||
// Initialize Tracing
|
||||
|
||||
if err := tracing.Initialize(); err != nil {
|
||||
return fmt.Errorf("error initializing tracing: %w", err)
|
||||
}
|
||||
|
||||
// Open connection to the database
|
||||
dbService, err := bundb.NewBunDBService(ctx, &state)
|
||||
if err != nil {
|
||||
|
@ -146,16 +153,23 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
|||
return fmt.Errorf("error creating router: %s", err)
|
||||
}
|
||||
|
||||
// attach global middlewares which are used for every request
|
||||
router.AttachGlobalMiddleware(
|
||||
middleware.AddRequestID(config.GetRequestIDHeader()),
|
||||
middlewares := []gin.HandlerFunc{
|
||||
middleware.AddRequestID(config.GetRequestIDHeader()), // requestID middleware must run before tracing
|
||||
}
|
||||
if config.GetTracingEnabled() {
|
||||
middlewares = append(middlewares, tracing.InstrumentGin())
|
||||
}
|
||||
middlewares = append(middlewares, []gin.HandlerFunc{
|
||||
// note: hooks adding ctx fields must be ABOVE
|
||||
// the logger, otherwise won't be accessible.
|
||||
middleware.Logger(),
|
||||
middleware.UserAgent(),
|
||||
middleware.CORS(),
|
||||
middleware.ExtraHeaders(),
|
||||
)
|
||||
}...)
|
||||
|
||||
// attach global middlewares which are used for every request
|
||||
router.AttachGlobalMiddleware(middlewares...)
|
||||
|
||||
// attach global no route / 404 handler to the router
|
||||
router.AttachNoRouteHandler(func(c *gin.Context) {
|
||||
|
|
|
@ -40,6 +40,7 @@ import (
|
|||
"github.com/superseriousbusiness/gotosocial/internal/oidc"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/state"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/tracing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/web"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
@ -51,6 +52,10 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
|||
testrig.InitTestConfig()
|
||||
testrig.InitTestLog()
|
||||
|
||||
if err := tracing.Initialize(); err != nil {
|
||||
return fmt.Errorf("error initializing tracing: %w", err)
|
||||
}
|
||||
|
||||
// Initialize caches
|
||||
state.Caches.Init()
|
||||
state.Caches.Start()
|
||||
|
@ -93,14 +98,21 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
|||
*/
|
||||
|
||||
router := testrig.NewTestRouter(state.DB)
|
||||
|
||||
// attach global middlewares which are used for every request
|
||||
router.AttachGlobalMiddleware(
|
||||
middlewares := []gin.HandlerFunc{
|
||||
middleware.AddRequestID(config.GetRequestIDHeader()), // requestID middleware must run before tracing
|
||||
}
|
||||
if config.GetTracingEnabled() {
|
||||
middlewares = append(middlewares, tracing.InstrumentGin())
|
||||
}
|
||||
middlewares = append(middlewares, []gin.HandlerFunc{
|
||||
middleware.Logger(),
|
||||
middleware.UserAgent(),
|
||||
middleware.CORS(),
|
||||
middleware.ExtraHeaders(),
|
||||
)
|
||||
}...)
|
||||
|
||||
// attach global middlewares which are used for every request
|
||||
router.AttachGlobalMiddleware(middlewares...)
|
||||
|
||||
// attach global no route / 404 handler to the router
|
||||
router.AttachNoRouteHandler(func(c *gin.Context) {
|
||||
|
|
|
@ -9,8 +9,34 @@ These settings let you tune and configure certain observability related behaviou
|
|||
##### OBSERVABILITY SETTINGS #####
|
||||
##################################
|
||||
|
||||
# Bool. Enable generation/parsing of a request ID for each received HTTP Request.
|
||||
# Default: true
|
||||
request-id-enabled: true
|
||||
|
||||
# String. Header name to use to extract a request or trace ID from. Typically set by a
|
||||
# loadbalancer or proxy.
|
||||
# Default: "X-Request-Id"
|
||||
request-id-header: "X-Request-Id"
|
||||
|
||||
# Bool. Enable OpenTelemetry based tracing support.
|
||||
# Default: false
|
||||
tracing-enabled: false
|
||||
|
||||
# String. Set the transport protocol for the tracing system. Can either be "grpc" for
|
||||
# OTLP gRPC or "jaeger" for jaeger based ingesters.
|
||||
# Options: ["grpc", "jaeger"]
|
||||
# Default: "grpc"
|
||||
tracing-transport: "grpc"
|
||||
|
||||
# String. Endpoint of the trace ingester. When using the gRPC based transport, the
|
||||
# endpoint is usually a single address/port combination. For the jaeger transport it
|
||||
# should be a fully qualified URL.
|
||||
# OTLP gRPC or "jaeger" for jaeger based ingesters
|
||||
# Examples: ["localhost:4317", "http://localhost:14268/api/traces"]
|
||||
# Default: ""
|
||||
tracing-endpoint: ""
|
||||
|
||||
# Bool. Disable HTTPS for the gRPC transport protocol.
|
||||
# Default: false
|
||||
tracing-insecure-transport: false
|
||||
```
|
||||
|
|
18
go.mod
18
go.mod
|
@ -49,8 +49,15 @@ require (
|
|||
github.com/uptrace/bun v1.1.12
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.1.12
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.12
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.12
|
||||
github.com/wagslane/go-password-validator v0.3.0
|
||||
github.com/yuin/goldmark v1.5.4
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.40.0
|
||||
go.opentelemetry.io/otel v1.14.0
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.14.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0
|
||||
go.opentelemetry.io/otel/sdk v1.14.0
|
||||
go.opentelemetry.io/otel/trace v1.14.0
|
||||
go.uber.org/automaxprocs v1.5.2
|
||||
golang.org/x/crypto v0.8.0
|
||||
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
|
||||
|
@ -77,6 +84,7 @@ require (
|
|||
codeberg.org/gruf/go-pools v1.1.0 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.8.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cilium/ebpf v0.9.1 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.1 // indirect
|
||||
|
@ -95,6 +103,8 @@ require (
|
|||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-errors/errors v1.4.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 // indirect
|
||||
|
@ -107,6 +117,7 @@ require (
|
|||
github.com/gorilla/css v1.0.0 // indirect
|
||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||
github.com/gorilla/sessions v1.2.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
|
@ -145,13 +156,20 @@ require (
|
|||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.9 // indirect
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.21 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
|
73
go.sum
73
go.sum
|
@ -92,12 +92,14 @@ github.com/KimMachineGun/automemlimit v0.2.6 h1:tQFriVTcIteUkV5EgU9iz03eDY36T8JU
|
|||
github.com/KimMachineGun/automemlimit v0.2.6/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0=
|
||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/abema/go-mp4 v0.10.1 h1:wOhZgNxjduc8r4FJdwPa5x/gdBSSX+8MTnfNj/xkJaE=
|
||||
github.com/abema/go-mp4 v0.10.1/go.mod h1:vPl9t5ZK7K0x68jh12/+ECWBCXoWuIDtNgPtU2f04ws=
|
||||
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
|
||||
|
@ -105,7 +107,11 @@ github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3b
|
|||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA=
|
||||
github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
|
@ -119,6 +125,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
|
@ -168,6 +179,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
|||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
|
@ -180,6 +193,7 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS
|
|||
github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
|
||||
github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
|
||||
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
|
||||
|
@ -205,6 +219,11 @@ github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyM
|
|||
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/form/v4 v4.2.0 h1:N1wh+Goz61e6w66vo8vJkQt+uwZSoLz50kZPJWR8eic=
|
||||
|
@ -238,6 +257,8 @@ github.com/golang/geo v0.0.0-20200319012246-673a6f80352d/go.mod h1:QZ0nwyI2jOfgR
|
|||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -278,6 +299,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
|
@ -320,6 +342,9 @@ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
|
|||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
|
||||
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
|
@ -485,6 +510,7 @@ github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTep
|
|||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
|
@ -510,6 +536,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE
|
|||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
|
||||
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
|
@ -526,6 +553,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
|
@ -589,6 +617,10 @@ github.com/uptrace/bun/dialect/pgdialect v1.1.12 h1:m/CM1UfOkoBTglGO5CUTKnIKKOAp
|
|||
github.com/uptrace/bun/dialect/pgdialect v1.1.12/go.mod h1:Ij6WIxQILxLlL2frUBxUBOZJtLElD2QQNDcu/PWDHTc=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.12 h1:Ud31nqZmebcQpl151nb108+vtcpxJ7kfXmbPYbALBiI=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.12/go.mod h1:Pwg7s31BdF3PMBlWTnYkEn2I9ASsvatt1Ln/AERCTV4=
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.12 h1:uWPU75j9dYGXMRC9jF0ASlndZZAcngoqZagH4w3kn54=
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.12/go.mod h1:QfszJGLzNaTTGvvg17cEEUyEwxXq2NJ7sRvrPYvYSIU=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.21 h1:iHkIlTU2P3xbSbVJbAiHL9IT+ekYV5empheF+652yeQ=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.21/go.mod h1:hiCFa1UeZITKXi8lhu2qwOD5LHXjdGMCUIQHbybxoF0=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.14.0/go.mod h1:ol1PCaL0dX20wC0htZ7sYCsvCYmrouYra0zHzaclZhE=
|
||||
|
@ -631,13 +663,35 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.40.0 h1:E4MMXDxufRnIHXhoTNOlNsdkWpC5HdLhfj84WNRKPkc=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.40.0/go.mod h1:A8+gHkpqTfMKxdKWq1pp360nAs096K26CH5Sm2YHDdA=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.15.0 h1:bMaonPyFcAvZ4EVzkUNkfnUHP5Zi63CIDlA3dRsEg8Q=
|
||||
go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
|
||||
go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.14.0 h1:CjbUNd4iN2hHmWekmOqZ+zSCU+dzZppG8XsV+A3oc8Q=
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.14.0/go.mod h1:4Ay9kk5vELRrbg5z4cpP9EtmQRFap2Wb0woPG4lujZA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
|
||||
go.opentelemetry.io/otel/metric v0.36.0 h1:t0lgGI+L68QWt3QtOIlqM9gXoxqxWLhZ3R/e5oOAY0Q=
|
||||
go.opentelemetry.io/otel/metric v0.36.0/go.mod h1:wKVw57sd2HdSZAzyfOM9gTqqE8v7CbqWsYL6AyrH9qk=
|
||||
go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
|
||||
go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
|
||||
go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
|
||||
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
||||
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
|
@ -741,6 +795,7 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
|
@ -757,6 +812,7 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
|
||||
golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
|
@ -818,7 +874,9 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -845,6 +903,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
|||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
|
@ -966,6 +1025,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
|
|||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
|
@ -979,6 +1039,9 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -992,9 +1055,15 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1007,6 +1076,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
|||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
|
@ -1026,6 +1096,7 @@ gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg
|
|||
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
|
@ -126,6 +126,11 @@ type Configuration struct {
|
|||
OIDCLinkExisting bool `name:"oidc-link-existing" usage:"link existing user accounts to OIDC logins based on the stored email value"`
|
||||
OIDCAdminGroups []string `name:"oidc-admin-groups" usage:"Membership of one of the listed groups makes someone a GtS admin"`
|
||||
|
||||
TracingEnabled bool `name:"tracing-enabled" usage:"Enable OTLP Tracing"`
|
||||
TracingTransport string `name:"tracing-transport" usage:"grpc or jaeger"`
|
||||
TracingEndpoint string `name:"tracing-endpoint" usage:"Endpoint of your trace collector. Eg., 'localhost:4317' for gRPC, 'http://localhost:14268/api/traces' for jaeger"`
|
||||
TracingInsecureTransport bool `name:"tracing-insecure" usage:"Disable HTTPS for the gRPC transport protocol"`
|
||||
|
||||
SMTPHost string `name:"smtp-host" usage:"Host of the smtp server. Eg., 'smtp.eu.mailgun.org'"`
|
||||
SMTPPort int `name:"smtp-port" usage:"Port of the smtp server. Eg., 587"`
|
||||
SMTPUsername string `name:"smtp-username" usage:"Username to authenticate with the smtp server as. Eg., 'postmaster@mail.example.org'"`
|
||||
|
@ -153,7 +158,7 @@ type Configuration struct {
|
|||
AdminTransPath string `name:"path" usage:"the path of the file to import from/export to"`
|
||||
AdminMediaPruneDryRun bool `name:"dry-run" usage:"perform a dry run and only log number of items eligible for pruning"`
|
||||
|
||||
RequestIDHeader string `name:"request-id-header" usage:"Header to extract the Request ID from. Eg.,'X-Request-Id'"`
|
||||
RequestIDHeader string `name:"request-id-header" usage:"Header to extract the Request ID from. Eg.,'X-Request-Id'."`
|
||||
}
|
||||
|
||||
type CacheConfiguration struct {
|
||||
|
|
|
@ -109,6 +109,11 @@ var Defaults = Configuration{
|
|||
SMTPFrom: "GoToSocial",
|
||||
SMTPDiscloseRecipients: false,
|
||||
|
||||
TracingEnabled: false,
|
||||
TracingTransport: "grpc",
|
||||
TracingEndpoint: "",
|
||||
TracingInsecureTransport: false,
|
||||
|
||||
SyslogEnabled: false,
|
||||
SyslogProtocol: "udp",
|
||||
SyslogAddress: "localhost:514",
|
||||
|
|
|
@ -1799,6 +1799,106 @@ func GetOIDCAdminGroups() []string { return global.GetOIDCAdminGroups() }
|
|||
// SetOIDCAdminGroups safely sets the value for global configuration 'OIDCAdminGroups' field
|
||||
func SetOIDCAdminGroups(v []string) { global.SetOIDCAdminGroups(v) }
|
||||
|
||||
// GetTracingEnabled safely fetches the Configuration value for state's 'TracingEnabled' field
|
||||
func (st *ConfigState) GetTracingEnabled() (v bool) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.TracingEnabled
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetTracingEnabled safely sets the Configuration value for state's 'TracingEnabled' field
|
||||
func (st *ConfigState) SetTracingEnabled(v bool) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.TracingEnabled = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// TracingEnabledFlag returns the flag name for the 'TracingEnabled' field
|
||||
func TracingEnabledFlag() string { return "tracing-enabled" }
|
||||
|
||||
// GetTracingEnabled safely fetches the value for global configuration 'TracingEnabled' field
|
||||
func GetTracingEnabled() bool { return global.GetTracingEnabled() }
|
||||
|
||||
// SetTracingEnabled safely sets the value for global configuration 'TracingEnabled' field
|
||||
func SetTracingEnabled(v bool) { global.SetTracingEnabled(v) }
|
||||
|
||||
// GetTracingTransport safely fetches the Configuration value for state's 'TracingTransport' field
|
||||
func (st *ConfigState) GetTracingTransport() (v string) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.TracingTransport
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetTracingTransport safely sets the Configuration value for state's 'TracingTransport' field
|
||||
func (st *ConfigState) SetTracingTransport(v string) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.TracingTransport = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// TracingTransportFlag returns the flag name for the 'TracingTransport' field
|
||||
func TracingTransportFlag() string { return "tracing-transport" }
|
||||
|
||||
// GetTracingTransport safely fetches the value for global configuration 'TracingTransport' field
|
||||
func GetTracingTransport() string { return global.GetTracingTransport() }
|
||||
|
||||
// SetTracingTransport safely sets the value for global configuration 'TracingTransport' field
|
||||
func SetTracingTransport(v string) { global.SetTracingTransport(v) }
|
||||
|
||||
// GetTracingEndpoint safely fetches the Configuration value for state's 'TracingEndpoint' field
|
||||
func (st *ConfigState) GetTracingEndpoint() (v string) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.TracingEndpoint
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetTracingEndpoint safely sets the Configuration value for state's 'TracingEndpoint' field
|
||||
func (st *ConfigState) SetTracingEndpoint(v string) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.TracingEndpoint = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// TracingEndpointFlag returns the flag name for the 'TracingEndpoint' field
|
||||
func TracingEndpointFlag() string { return "tracing-endpoint" }
|
||||
|
||||
// GetTracingEndpoint safely fetches the value for global configuration 'TracingEndpoint' field
|
||||
func GetTracingEndpoint() string { return global.GetTracingEndpoint() }
|
||||
|
||||
// SetTracingEndpoint safely sets the value for global configuration 'TracingEndpoint' field
|
||||
func SetTracingEndpoint(v string) { global.SetTracingEndpoint(v) }
|
||||
|
||||
// GetTracingInsecureTransport safely fetches the Configuration value for state's 'TracingInsecureTransport' field
|
||||
func (st *ConfigState) GetTracingInsecureTransport() (v bool) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.TracingInsecureTransport
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetTracingInsecureTransport safely sets the Configuration value for state's 'TracingInsecureTransport' field
|
||||
func (st *ConfigState) SetTracingInsecureTransport(v bool) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.TracingInsecureTransport = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// TracingInsecureTransportFlag returns the flag name for the 'TracingInsecureTransport' field
|
||||
func TracingInsecureTransportFlag() string { return "tracing-insecure" }
|
||||
|
||||
// GetTracingInsecureTransport safely fetches the value for global configuration 'TracingInsecureTransport' field
|
||||
func GetTracingInsecureTransport() bool { return global.GetTracingInsecureTransport() }
|
||||
|
||||
// SetTracingInsecureTransport safely sets the value for global configuration 'TracingInsecureTransport' field
|
||||
func SetTracingInsecureTransport(v bool) { global.SetTracingInsecureTransport(v) }
|
||||
|
||||
// GetSMTPHost safely fetches the Configuration value for state's 'SMTPHost' field
|
||||
func (st *ConfigState) GetSMTPHost() (v string) {
|
||||
st.mutex.Lock()
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
"github.com/superseriousbusiness/gotosocial/internal/id"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/state"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/tracing"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect/pgdialect"
|
||||
"github.com/uptrace/bun/dialect/sqlitedialect"
|
||||
|
@ -134,6 +135,10 @@ func NewBunDBService(ctx context.Context, state *state.State) (db.DB, error) {
|
|||
// Add database query hook
|
||||
conn.DB.AddQueryHook(queryHook{})
|
||||
|
||||
if config.GetTracingEnabled() {
|
||||
conn.DB.AddQueryHook(tracing.InstrumentBun())
|
||||
}
|
||||
|
||||
// execute sqlite pragmas *after* adding database hook;
|
||||
// this allows the pragma queries to be logged
|
||||
if t == "sqlite" {
|
||||
|
|
|
@ -65,9 +65,8 @@ func generateID() string {
|
|||
// AddRequestID returns a gin middleware which adds a unique ID to each request (both response header and context).
|
||||
func AddRequestID(header string) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Look for existing ID.
|
||||
id := c.GetHeader(header)
|
||||
|
||||
// Have we found anything?
|
||||
if id == "" {
|
||||
// Generate new ID.
|
||||
//
|
||||
|
|
43
internal/tracing/no_trace.go
Normal file
43
internal/tracing/no_trace.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build notracing
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
func Initialize() error {
|
||||
if config.GetTracingEnabled() {
|
||||
return errors.New("tracing was disabled at build time")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func InstrumentGin() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {}
|
||||
}
|
||||
|
||||
func InstrumentBun() bun.QueryHook {
|
||||
return nil
|
||||
}
|
175
internal/tracing/tracing.go
Normal file
175
internal/tracing/tracing.go
Normal file
|
@ -0,0 +1,175 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !notracing
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"codeberg.org/gruf/go-kv"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/extra/bunotel"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/jaeger"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||
"go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
|
||||
oteltrace "go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
)
|
||||
|
||||
const (
|
||||
tracerKey = "gotosocial-server-tracer"
|
||||
tracerName = "github.com/superseriousbusiness/gotosocial/internal/tracing"
|
||||
)
|
||||
|
||||
func Initialize() error {
|
||||
if !config.GetTracingEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
insecure := config.GetTracingInsecureTransport()
|
||||
|
||||
var tpo trace.TracerProviderOption
|
||||
switch config.GetTracingTransport() {
|
||||
case "grpc":
|
||||
opts := []otlptracegrpc.Option{
|
||||
otlptracegrpc.WithEndpoint(config.GetTracingEndpoint()),
|
||||
}
|
||||
if insecure {
|
||||
opts = append(opts, otlptracegrpc.WithInsecure())
|
||||
}
|
||||
exp, err := otlptracegrpc.New(context.Background(), opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building tracing exporter: %w", err)
|
||||
}
|
||||
tpo = trace.WithBatcher(exp)
|
||||
case "jaeger":
|
||||
exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(config.GetTracingEndpoint())))
|
||||
if err != nil {
|
||||
return fmt.Errorf("building tracing exporter: %w", err)
|
||||
}
|
||||
tpo = trace.WithBatcher(exp)
|
||||
default:
|
||||
return fmt.Errorf("invalid tracing transport: %s", config.GetTracingTransport())
|
||||
}
|
||||
r, _ := resource.Merge(
|
||||
resource.Default(),
|
||||
resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceName("GoToSocial"),
|
||||
),
|
||||
)
|
||||
|
||||
tp := trace.NewTracerProvider(
|
||||
tpo,
|
||||
trace.WithResource(r),
|
||||
)
|
||||
otel.SetTracerProvider(tp)
|
||||
propagator := propagation.NewCompositeTextMapPropagator(
|
||||
propagation.TraceContext{},
|
||||
propagation.Baggage{},
|
||||
)
|
||||
otel.SetTextMapPropagator(propagator)
|
||||
log.Hook(func(ctx context.Context, kvs []kv.Field) []kv.Field {
|
||||
span := oteltrace.SpanFromContext(ctx)
|
||||
if span != nil && span.SpanContext().HasTraceID() {
|
||||
return append(kvs, kv.Field{K: "traceID", V: span.SpanContext().TraceID().String()})
|
||||
}
|
||||
return kvs
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstrumentGin is a middleware injecting tracing information based on the
|
||||
// otelgin implementation found at
|
||||
// https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/instrumentation/github.com/gin-gonic/gin/otelgin/gintrace.go
|
||||
func InstrumentGin() gin.HandlerFunc {
|
||||
provider := otel.GetTracerProvider()
|
||||
tracer := provider.Tracer(
|
||||
tracerName,
|
||||
oteltrace.WithInstrumentationVersion(config.GetSoftwareVersion()),
|
||||
)
|
||||
propagator := otel.GetTextMapPropagator()
|
||||
return func(c *gin.Context) {
|
||||
c.Set(tracerKey, tracer)
|
||||
savedCtx := c.Request.Context()
|
||||
defer func() {
|
||||
c.Request = c.Request.WithContext(savedCtx)
|
||||
}()
|
||||
ctx := propagator.Extract(savedCtx, propagation.HeaderCarrier(c.Request.Header))
|
||||
opts := []oteltrace.SpanStartOption{
|
||||
oteltrace.WithAttributes(httpconv.ServerRequest(config.GetHost(), c.Request)...),
|
||||
oteltrace.WithSpanKind(oteltrace.SpanKindServer),
|
||||
}
|
||||
spanName := c.FullPath()
|
||||
if spanName == "" {
|
||||
spanName = fmt.Sprintf("HTTP %s route not found", c.Request.Method)
|
||||
} else {
|
||||
rAttr := semconv.HTTPRoute(spanName)
|
||||
opts = append(opts, oteltrace.WithAttributes(rAttr))
|
||||
}
|
||||
id := gtscontext.RequestID(c.Request.Context())
|
||||
if id != "" {
|
||||
opts = append(opts, oteltrace.WithAttributes(attribute.String("requestID", id)))
|
||||
}
|
||||
ctx, span := tracer.Start(ctx, spanName, opts...)
|
||||
defer span.End()
|
||||
|
||||
// pass the span through the request context
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
|
||||
// serve the request to the next middleware
|
||||
c.Next()
|
||||
|
||||
status := c.Writer.Status()
|
||||
span.SetStatus(httpconv.ServerStatus(status))
|
||||
if status > 0 {
|
||||
span.SetAttributes(semconv.HTTPStatusCode(status))
|
||||
}
|
||||
if len(c.Errors) > 0 {
|
||||
span.SetAttributes(attribute.String("gin.errors", c.Errors.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func InjectRequestID() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
id := gtscontext.RequestID(c.Request.Context())
|
||||
if id != "" {
|
||||
span := oteltrace.SpanFromContext(c.Request.Context())
|
||||
span.SetAttributes(attribute.String("requestID", id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func InstrumentBun() bun.QueryHook {
|
||||
return bunotel.NewQueryHook(
|
||||
bunotel.WithFormattedQueries(true),
|
||||
)
|
||||
}
|
|
@ -151,6 +151,10 @@ EXPECT=$(cat <<"EOF"
|
|||
"syslog-protocol": "udp",
|
||||
"tls-certificate-chain": "",
|
||||
"tls-certificate-key": "",
|
||||
"tracing-enabled": false,
|
||||
"tracing-endpoint": "localhost:4317",
|
||||
"tracing-insecure": false,
|
||||
"tracing-transport": "grpc",
|
||||
"trusted-proxies": [
|
||||
"127.0.0.1/32",
|
||||
"docker.host.local"
|
||||
|
@ -240,6 +244,7 @@ GTS_SMTP_DISCLOSE_RECIPIENTS=true \
|
|||
GTS_SYSLOG_ENABLED=true \
|
||||
GTS_SYSLOG_PROTOCOL='udp' \
|
||||
GTS_SYSLOG_ADDRESS='127.0.0.1:6969' \
|
||||
GTS_TRACING_ENDPOINT='localhost:4317' \
|
||||
GTS_ADVANCED_COOKIES_SAMESITE='strict' \
|
||||
GTS_ADVANCED_RATE_LIMIT_REQUESTS=6969 \
|
||||
GTS_ADVANCED_SENDER_MULTIPLIER=-1 \
|
||||
|
|
|
@ -113,6 +113,11 @@ var testDefaults = config.Configuration{
|
|||
SMTPFrom: "GoToSocial",
|
||||
SMTPDiscloseRecipients: false,
|
||||
|
||||
TracingEnabled: false,
|
||||
TracingEndpoint: "localhost:4317",
|
||||
TracingTransport: "grpc",
|
||||
TracingInsecureTransport: true,
|
||||
|
||||
SyslogEnabled: false,
|
||||
SyslogProtocol: "udp",
|
||||
SyslogAddress: "localhost:514",
|
||||
|
|
25
vendor/github.com/cenkalti/backoff/v4/.gitignore
generated
vendored
Normal file
25
vendor/github.com/cenkalti/backoff/v4/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
# IDEs
|
||||
.idea/
|
20
vendor/github.com/cenkalti/backoff/v4/LICENSE
generated
vendored
Normal file
20
vendor/github.com/cenkalti/backoff/v4/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Cenk Altı
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
32
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
Normal file
32
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
[Exponential backoff][exponential backoff wiki]
|
||||
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
|
||||
in order to gradually find an acceptable rate.
|
||||
The retries exponentially increase and stop increasing when a certain threshold is met.
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
|
||||
## Contributing
|
||||
|
||||
* I would like to keep this library as small as possible.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[travis]: https://travis-ci.org/cenkalti/backoff
|
||||
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
66
vendor/github.com/cenkalti/backoff/v4/backoff.go
generated
vendored
Normal file
66
vendor/github.com/cenkalti/backoff/v4/backoff.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Package backoff implements backoff algorithms for retrying operations.
|
||||
//
|
||||
// Use Retry function for retrying operations that may fail.
|
||||
// If Retry does not meet your needs,
|
||||
// copy/paste the function into your project and modify as you wish.
|
||||
//
|
||||
// There is also Ticker type similar to time.Ticker.
|
||||
// You can use it if you need to work with channels.
|
||||
//
|
||||
// See Examples section below for usage examples.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
||||
// Reset to initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||
const Stop time.Duration = -1
|
||||
|
||||
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||
type ZeroBackOff struct{}
|
||||
|
||||
func (b *ZeroBackOff) Reset() {}
|
||||
|
||||
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||
|
||||
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||
// NextBackOff(), meaning that the operation should never be retried.
|
||||
type StopBackOff struct{}
|
||||
|
||||
func (b *StopBackOff) Reset() {}
|
||||
|
||||
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||
|
||||
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||
// This is in contrast to an exponential backoff policy,
|
||||
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||
type ConstantBackOff struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (b *ConstantBackOff) Reset() {}
|
||||
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||
|
||||
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||
return &ConstantBackOff{Interval: d}
|
||||
}
|
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
Normal file
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
161
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
Normal file
161
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
Normal file
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||
return func() (struct{}, error) {
|
||||
return struct{}{}, o()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||
return RetryNotifyWithData(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, t)
|
||||
}
|
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
var (
|
||||
err error
|
||||
next time.Duration
|
||||
res T
|
||||
)
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
res, err = operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return res, ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
97
vendor/github.com/cenkalti/backoff/v4/ticker.go
generated
vendored
Normal file
97
vendor/github.com/cenkalti/backoff/v4/ticker.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||
//
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker containing a channel that will send
|
||||
// the time at times specified by the BackOff argument. Ticker is
|
||||
// guaranteed to tick at least once. The channel is closed when Stop
|
||||
// method is called or BackOff stops. It is not safe to manipulate the
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
go t.run()
|
||||
return t
|
||||
}
|
||||
|
||||
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||
func (t *Ticker) Stop() {
|
||||
t.stopOnce.Do(func() { close(t.stop) })
|
||||
}
|
||||
|
||||
func (t *Ticker) run() {
|
||||
c := t.c
|
||||
defer close(c)
|
||||
|
||||
// Ticker is guaranteed to tick at least once.
|
||||
afterC := t.send(time.Now())
|
||||
|
||||
for {
|
||||
if afterC == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case tick := <-afterC:
|
||||
afterC = t.send(tick)
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||
select {
|
||||
case t.c <- tick:
|
||||
case <-t.stop:
|
||||
return nil
|
||||
}
|
||||
|
||||
next := t.b.NextBackOff()
|
||||
if next == Stop {
|
||||
t.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
t.timer.Start(next)
|
||||
return t.timer.C()
|
||||
}
|
35
vendor/github.com/cenkalti/backoff/v4/timer.go
generated
vendored
Normal file
35
vendor/github.com/cenkalti/backoff/v4/timer.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
}
|
||||
|
||||
// defaultTimer implements Timer interface using time.Timer
|
||||
type defaultTimer struct {
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// C returns the timers channel which receives the current time when the timer fires.
|
||||
func (t *defaultTimer) C() <-chan time.Time {
|
||||
return t.timer.C
|
||||
}
|
||||
|
||||
// Start starts the timer to fire after the given duration
|
||||
func (t *defaultTimer) Start(duration time.Duration) {
|
||||
if t.timer == nil {
|
||||
t.timer = time.NewTimer(duration)
|
||||
} else {
|
||||
t.timer.Reset(duration)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||
func (t *defaultTimer) Stop() {
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
}
|
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
Normal file
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
29
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
Normal file
29
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
run:
|
||||
timeout: 1m
|
||||
tests: true
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- deadcode
|
||||
- errcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 10
|
6
vendor/github.com/go-logr/logr/CHANGELOG.md
generated
vendored
Normal file
6
vendor/github.com/go-logr/logr/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# CHANGELOG
|
||||
|
||||
## v1.0.0-rc1
|
||||
|
||||
This is the first logged release. Major changes (including breaking changes)
|
||||
have occurred since earlier tags.
|
17
vendor/github.com/go-logr/logr/CONTRIBUTING.md
generated
vendored
Normal file
17
vendor/github.com/go-logr/logr/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
# Contributing
|
||||
|
||||
Logr is open to pull-requests, provided they fit within the intended scope of
|
||||
the project. Specifically, this library aims to be VERY small and minimalist,
|
||||
with no external dependencies.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This project intends to follow [semantic versioning](http://semver.org) and
|
||||
is very strict about compatibility. Any proposed changes MUST follow those
|
||||
rules.
|
||||
|
||||
## Performance
|
||||
|
||||
As a logging library, logr must be as light-weight as possible. Any proposed
|
||||
code change must include results of running the [benchmark](./benchmark)
|
||||
before and after the change.
|
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
282
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
282
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
|
@ -0,0 +1,282 @@
|
|||
# A minimal logging API for Go
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
|
||||
|
||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||
without becoming coupled to a particular logging implementation. This is not
|
||||
an implementation of logging - it is an API. In fact it is two APIs with two
|
||||
different sets of users.
|
||||
|
||||
The `Logger` type is intended for application and library authors. It provides
|
||||
a relatively small API which can be used everywhere you want to emit logs. It
|
||||
defers the actual act of writing logs (to files, to stdout, or whatever) to the
|
||||
`LogSink` interface.
|
||||
|
||||
The `LogSink` interface is intended for logging library implementers. It is a
|
||||
pure interface which can be implemented by logging frameworks to provide the actual logging
|
||||
functionality.
|
||||
|
||||
This decoupling allows application and library developers to write code in
|
||||
terms of `logr.Logger` (which has very low dependency fan-out) while the
|
||||
implementation of logging is managed "up stack" (e.g. in or near `main()`.)
|
||||
Application developers can then switch out implementations as necessary.
|
||||
|
||||
Many people assert that libraries should not be logging, and as such efforts
|
||||
like this are pointless. Those people are welcome to convince the authors of
|
||||
the tens-of-thousands of libraries that *DO* write logs that they are all
|
||||
wrong. In the meantime, logr takes a more practical approach.
|
||||
|
||||
## Typical usage
|
||||
|
||||
Somewhere, early in an application's life, it will make a decision about which
|
||||
logging library (implementation) it actually wants to use. Something like:
|
||||
|
||||
```
|
||||
func main() {
|
||||
// ... other setup code ...
|
||||
|
||||
// Create the "root" logger. We have chosen the "logimpl" implementation,
|
||||
// which takes some initial parameters and returns a logr.Logger.
|
||||
logger := logimpl.New(param1, param2)
|
||||
|
||||
// ... other setup code ...
|
||||
```
|
||||
|
||||
Most apps will call into other libraries, create structures to govern the flow,
|
||||
etc. The `logr.Logger` object can be passed to these other libraries, stored
|
||||
in structs, or even used as a package-global variable, if needed. For example:
|
||||
|
||||
```
|
||||
app := createTheAppObject(logger)
|
||||
app.Run()
|
||||
```
|
||||
|
||||
Outside of this early setup, no other packages need to know about the choice of
|
||||
implementation. They write logs in terms of the `logr.Logger` that they
|
||||
received:
|
||||
|
||||
```
|
||||
type appObject struct {
|
||||
// ... other fields ...
|
||||
logger logr.Logger
|
||||
// ... other fields ...
|
||||
}
|
||||
|
||||
func (app *appObject) Run() {
|
||||
app.logger.Info("starting up", "timestamp", time.Now())
|
||||
|
||||
// ... app code ...
|
||||
```
|
||||
|
||||
## Background
|
||||
|
||||
If the Go standard library had defined an interface for logging, this project
|
||||
probably would not be needed. Alas, here we are.
|
||||
|
||||
### Inspiration
|
||||
|
||||
Before you consider this package, please read [this blog post by the
|
||||
inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
|
||||
he has to say, and it largely aligns with our own experiences.
|
||||
|
||||
### Differences from Dave's ideas
|
||||
|
||||
The main differences are:
|
||||
|
||||
1. Dave basically proposes doing away with the notion of a logging API in favor
|
||||
of `fmt.Printf()`. We disagree, especially when you consider things like output
|
||||
locations, timestamps, file and line decorations, and structured logging. This
|
||||
package restricts the logging API to just 2 types of logs: info and error.
|
||||
|
||||
Info logs are things you want to tell the user which are not errors. Error
|
||||
logs are, well, errors. If your code receives an `error` from a subordinate
|
||||
function call and is logging that `error` *and not returning it*, use error
|
||||
logs.
|
||||
|
||||
2. Verbosity-levels on info logs. This gives developers a chance to indicate
|
||||
arbitrary grades of importance for info logs, without assigning names with
|
||||
semantic meaning such as "warning", "trace", and "debug." Superficially this
|
||||
may feel very similar, but the primary difference is the lack of semantics.
|
||||
Because verbosity is a numerical value, it's safe to assume that an app running
|
||||
with higher verbosity means more (and less important) logs will be generated.
|
||||
|
||||
## Implementations (non-exhaustive)
|
||||
|
||||
There are implementations for the following logging libraries:
|
||||
|
||||
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
|
||||
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
|
||||
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
||||
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
|
||||
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
|
||||
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
||||
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
|
||||
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
||||
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
|
||||
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
|
||||
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
|
||||
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
|
||||
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
|
||||
|
||||
## FAQ
|
||||
|
||||
### Conceptual
|
||||
|
||||
#### Why structured logging?
|
||||
|
||||
- **Structured logs are more easily queryable**: Since you've got
|
||||
key-value pairs, it's much easier to query your structured logs for
|
||||
particular values by filtering on the contents of a particular key --
|
||||
think searching request logs for error codes, Kubernetes reconcilers for
|
||||
the name and namespace of the reconciled object, etc.
|
||||
|
||||
- **Structured logging makes it easier to have cross-referenceable logs**:
|
||||
Similarly to searchability, if you maintain conventions around your
|
||||
keys, it becomes easy to gather all log lines related to a particular
|
||||
concept.
|
||||
|
||||
- **Structured logs allow better dimensions of filtering**: if you have
|
||||
structure to your logs, you've got more precise control over how much
|
||||
information is logged -- you might choose in a particular configuration
|
||||
to log certain keys but not others, only log lines where a certain key
|
||||
matches a certain value, etc., instead of just having v-levels and names
|
||||
to key off of.
|
||||
|
||||
- **Structured logs better represent structured data**: sometimes, the
|
||||
data that you want to log is inherently structured (think tuple-link
|
||||
objects.) Structured logs allow you to preserve that structure when
|
||||
outputting.
|
||||
|
||||
#### Why V-levels?
|
||||
|
||||
**V-levels give operators an easy way to control the chattiness of log
|
||||
operations**. V-levels provide a way for a given package to distinguish
|
||||
the relative importance or verbosity of a given log message. Then, if
|
||||
a particular logger or package is logging too many messages, the user
|
||||
of the package can simply change the v-levels for that library.
|
||||
|
||||
#### Why not named levels, like Info/Warning/Error?
|
||||
|
||||
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
|
||||
from Dave's ideas](#differences-from-daves-ideas).
|
||||
|
||||
#### Why not allow format strings, too?
|
||||
|
||||
**Format strings negate many of the benefits of structured logs**:
|
||||
|
||||
- They're not easily searchable without resorting to fuzzy searching,
|
||||
regular expressions, etc.
|
||||
|
||||
- They don't store structured data well, since contents are flattened into
|
||||
a string.
|
||||
|
||||
- They're not cross-referenceable.
|
||||
|
||||
- They don't compress easily, since the message is not constant.
|
||||
|
||||
(Unless you turn positional parameters into key-value pairs with numerical
|
||||
keys, at which point you've gotten key-value logging with meaningless
|
||||
keys.)
|
||||
|
||||
### Practical
|
||||
|
||||
#### Why key-value pairs, and not a map?
|
||||
|
||||
Key-value pairs are *much* easier to optimize, especially around
|
||||
allocations. Zap (a structured logger that inspired logr's interface) has
|
||||
[performance measurements](https://github.com/uber-go/zap#performance)
|
||||
that show this quite nicely.
|
||||
|
||||
While the interface ends up being a little less obvious, you get
|
||||
potentially better performance, plus avoid making users type
|
||||
`map[string]string{}` every time they want to log.
|
||||
|
||||
#### What if my V-levels differ between libraries?
|
||||
|
||||
That's fine. Control your V-levels on a per-logger basis, and use the
|
||||
`WithName` method to pass different loggers to different libraries.
|
||||
|
||||
Generally, you should take care to ensure that you have relatively
|
||||
consistent V-levels within a given logger, however, as this makes deciding
|
||||
on what verbosity of logs to request easier.
|
||||
|
||||
#### But I really want to use a format string!
|
||||
|
||||
That's not actually a question. Assuming your question is "how do
|
||||
I convert my mental model of logging with format strings to logging with
|
||||
constant messages":
|
||||
|
||||
1. Figure out what the error actually is, as you'd write in a TL;DR style,
|
||||
and use that as a message.
|
||||
|
||||
2. For every place you'd write a format specifier, look to the word before
|
||||
it, and add that as a key value pair.
|
||||
|
||||
For instance, consider the following examples (all taken from spots in the
|
||||
Kubernetes codebase):
|
||||
|
||||
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
|
||||
responseCode, err)` becomes `logger.Error(err, "client returned an
|
||||
error", "code", responseCode)`
|
||||
|
||||
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
|
||||
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
|
||||
response when requesting url", "attempt", retries, "after
|
||||
seconds", seconds, "url", url)`
|
||||
|
||||
If you *really* must use a format string, use it in a key's value, and
|
||||
call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
|
||||
reflect over type %T")` becomes `logger.Info("unable to reflect over
|
||||
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
|
||||
this is necessary should be few and far between.
|
||||
|
||||
#### How do I choose my V-levels?
|
||||
|
||||
This is basically the only hard constraint: increase V-levels to denote
|
||||
more verbose or more debug-y logs.
|
||||
|
||||
Otherwise, you can start out with `0` as "you always want to see this",
|
||||
`1` as "common logging that you might *possibly* want to turn off", and
|
||||
`10` as "I would like to performance-test your log collection stack."
|
||||
|
||||
Then gradually choose levels in between as you need them, working your way
|
||||
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
||||
info-type logs.)
|
||||
|
||||
#### How do I choose my keys?
|
||||
|
||||
Keys are fairly flexible, and can hold more or less any string
|
||||
value. For best compatibility with implementations and consistency
|
||||
with existing code in other projects, there are a few conventions you
|
||||
should consider.
|
||||
|
||||
- Make your keys human-readable.
|
||||
- Constant keys are generally a good idea.
|
||||
- Be consistent across your codebase.
|
||||
- Keys should naturally match parts of the message string.
|
||||
- Use lower case for simple keys and
|
||||
[lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
|
||||
more complex ones. Kubernetes is one example of a project that has
|
||||
[adopted that
|
||||
convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
|
||||
|
||||
While key names are mostly unrestricted (and spaces are acceptable),
|
||||
it's generally a good idea to stick to printable ascii characters, or at
|
||||
least match the general character set of your log lines.
|
||||
|
||||
#### Why should keys be constant values?
|
||||
|
||||
The point of structured logging is to make later log processing easier. Your
|
||||
keys are, effectively, the schema of each log message. If you use different
|
||||
keys across instances of the same log line, you will make your structured logs
|
||||
much harder to use. `Sprintf()` is for values, not for keys!
|
||||
|
||||
#### Why is this not a pure interface?
|
||||
|
||||
The Logger type is implemented as a struct in order to allow the Go compiler to
|
||||
optimize things like high-V `Info` logs that are not triggered. Not all of
|
||||
these implementations are implemented yet, but this structure was suggested as
|
||||
a way to ensure they *can* be implemented. All of the real work is behind the
|
||||
`LogSink` interface.
|
||||
|
||||
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
54
vendor/github.com/go-logr/logr/discard.go
generated
vendored
Normal file
54
vendor/github.com/go-logr/logr/discard.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
Copyright 2020 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// Discard returns a Logger that discards all messages logged to it. It can be
|
||||
// used whenever the caller is not interested in the logs. Logger instances
|
||||
// produced by this function always compare as equal.
|
||||
func Discard() Logger {
|
||||
return Logger{
|
||||
level: 0,
|
||||
sink: discardLogSink{},
|
||||
}
|
||||
}
|
||||
|
||||
// discardLogSink is a LogSink that discards all messages.
|
||||
type discardLogSink struct{}
|
||||
|
||||
// Verify that it actually implements the interface
|
||||
var _ LogSink = discardLogSink{}
|
||||
|
||||
func (l discardLogSink) Init(RuntimeInfo) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) Enabled(int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (l discardLogSink) Info(int, string, ...interface{}) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) Error(error, string, ...interface{}) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) WithValues(...interface{}) LogSink {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l discardLogSink) WithName(string) LogSink {
|
||||
return l
|
||||
}
|
787
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
Normal file
787
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
Normal file
|
@ -0,0 +1,787 @@
|
|||
/*
|
||||
Copyright 2021 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package funcr implements formatting of structured log messages and
|
||||
// optionally captures the call site and timestamp.
|
||||
//
|
||||
// The simplest way to use it is via its implementation of a
|
||||
// github.com/go-logr/logr.LogSink with output through an arbitrary
|
||||
// "write" function. See New and NewJSON for details.
|
||||
//
|
||||
// Custom LogSinks
|
||||
//
|
||||
// For users who need more control, a funcr.Formatter can be embedded inside
|
||||
// your own custom LogSink implementation. This is useful when the LogSink
|
||||
// needs to implement additional methods, for example.
|
||||
//
|
||||
// Formatting
|
||||
//
|
||||
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
|
||||
// values which are being logged. When rendering a struct, funcr will use Go's
|
||||
// standard JSON tags (all except "string").
|
||||
package funcr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// New returns a logr.Logger which is implemented by an arbitrary function.
|
||||
func New(fn func(prefix, args string), opts Options) logr.Logger {
|
||||
return logr.New(newSink(fn, NewFormatter(opts)))
|
||||
}
|
||||
|
||||
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
|
||||
// and produces JSON output.
|
||||
func NewJSON(fn func(obj string), opts Options) logr.Logger {
|
||||
fnWrapper := func(_, obj string) {
|
||||
fn(obj)
|
||||
}
|
||||
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
|
||||
}
|
||||
|
||||
// Underlier exposes access to the underlying logging function. Since
|
||||
// callers only have a logr.Logger, they have to know which
|
||||
// implementation is in use, so this interface is less of an
|
||||
// abstraction and more of a way to test type conversion.
|
||||
type Underlier interface {
|
||||
GetUnderlying() func(prefix, args string)
|
||||
}
|
||||
|
||||
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
|
||||
l := &fnlogger{
|
||||
Formatter: formatter,
|
||||
write: fn,
|
||||
}
|
||||
// For skipping fnlogger.Info and fnlogger.Error.
|
||||
l.Formatter.AddCallDepth(1)
|
||||
return l
|
||||
}
|
||||
|
||||
// Options carries parameters which influence the way logs are generated.
|
||||
type Options struct {
|
||||
// LogCaller tells funcr to add a "caller" key to some or all log lines.
|
||||
// This has some overhead, so some users might not want it.
|
||||
LogCaller MessageClass
|
||||
|
||||
// LogCallerFunc tells funcr to also log the calling function name. This
|
||||
// has no effect if caller logging is not enabled (see Options.LogCaller).
|
||||
LogCallerFunc bool
|
||||
|
||||
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
|
||||
// overhead, so some users might not want it.
|
||||
LogTimestamp bool
|
||||
|
||||
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
|
||||
// is enabled. If not specified, a default format will be used. For more
|
||||
// details, see docs for Go's time.Layout.
|
||||
TimestampFormat string
|
||||
|
||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||
// more logs. Info logs at or below this level will be written, while logs
|
||||
// above this level will be discarded.
|
||||
Verbosity int
|
||||
|
||||
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
|
||||
// while a log line is being rendered. The kvList argument follows logr
|
||||
// conventions - each pair of slice elements is comprised of a string key
|
||||
// and an arbitrary value (verified and sanitized before calling this
|
||||
// hook). The value returned must follow the same conventions. This hook
|
||||
// can be used to audit or modify logged data. For example, you might want
|
||||
// to prefix all of funcr's built-in keys with some string. This hook is
|
||||
// only called for built-in (provided by funcr itself) key-value pairs.
|
||||
// Equivalent hooks are offered for key-value pairs saved via
|
||||
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
|
||||
// for user-provided pairs (see RenderArgsHook).
|
||||
RenderBuiltinsHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
|
||||
// only called for key-value pairs saved via logr.Logger.WithValues. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderValuesHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
|
||||
// called for key-value pairs passed directly to Info and Error. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderArgsHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
|
||||
// that contains a struct, etc.) it may log. Every time it finds a struct,
|
||||
// slice, array, or map the depth is increased by one. When the maximum is
|
||||
// reached, the value will be converted to a string indicating that the max
|
||||
// depth has been exceeded. If this field is not specified, a default
|
||||
// value will be used.
|
||||
MaxLogDepth int
|
||||
}
|
||||
|
||||
// MessageClass indicates which category or categories of messages to consider.
|
||||
type MessageClass int
|
||||
|
||||
const (
|
||||
// None ignores all message classes.
|
||||
None MessageClass = iota
|
||||
// All considers all message classes.
|
||||
All
|
||||
// Info only considers info messages.
|
||||
Info
|
||||
// Error only considers error messages.
|
||||
Error
|
||||
)
|
||||
|
||||
// fnlogger inherits some of its LogSink implementation from Formatter
|
||||
// and just needs to add some glue code.
|
||||
type fnlogger struct {
|
||||
Formatter
|
||||
write func(prefix, args string)
|
||||
}
|
||||
|
||||
func (l fnlogger) WithName(name string) logr.LogSink {
|
||||
l.Formatter.AddName(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
||||
l.Formatter.AddCallDepth(depth)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatError(err, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) GetUnderlying() func(prefix, args string) {
|
||||
return l.write
|
||||
}
|
||||
|
||||
// Assert conformance to the interfaces.
|
||||
var _ logr.LogSink = &fnlogger{}
|
||||
var _ logr.CallDepthLogSink = &fnlogger{}
|
||||
var _ Underlier = &fnlogger{}
|
||||
|
||||
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
|
||||
func NewFormatter(opts Options) Formatter {
|
||||
return newFormatter(opts, outputKeyValue)
|
||||
}
|
||||
|
||||
// NewFormatterJSON constructs a Formatter which emits strict JSON.
|
||||
func NewFormatterJSON(opts Options) Formatter {
|
||||
return newFormatter(opts, outputJSON)
|
||||
}
|
||||
|
||||
// Defaults for Options.
|
||||
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
|
||||
const defaultMaxLogDepth = 16
|
||||
|
||||
func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||
if opts.TimestampFormat == "" {
|
||||
opts.TimestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if opts.MaxLogDepth == 0 {
|
||||
opts.MaxLogDepth = defaultMaxLogDepth
|
||||
}
|
||||
f := Formatter{
|
||||
outputFormat: outfmt,
|
||||
prefix: "",
|
||||
values: nil,
|
||||
depth: 0,
|
||||
opts: opts,
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Formatter is an opaque struct which can be embedded in a LogSink
|
||||
// implementation. It should be constructed with NewFormatter. Some of
|
||||
// its methods directly implement logr.LogSink.
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []interface{}
|
||||
valuesStr string
|
||||
depth int
|
||||
opts Options
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
type outputFormat int
|
||||
|
||||
const (
|
||||
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
|
||||
outputKeyValue outputFormat = iota
|
||||
// outputJSON emits strict JSON.
|
||||
outputJSON
|
||||
)
|
||||
|
||||
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
||||
type PseudoStruct []interface{}
|
||||
|
||||
// render produces a log line, ready to use.
|
||||
func (f Formatter) render(builtins, args []interface{}) string {
|
||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
vals := builtins
|
||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||
continuing := len(builtins) > 0
|
||||
if len(f.valuesStr) > 0 {
|
||||
if continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
continuing = true
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// flatten renders a list of key-value pairs into a buffer. If continuing is
|
||||
// true, it assumes that the buffer has previous values and will emit a
|
||||
// separator (which depends on the output format) before the first pair it
|
||||
// writes. If escapeKeys is true, the keys are assumed to have
|
||||
// non-JSON-compatible characters in them and must be evaluated for escapes.
|
||||
//
|
||||
// This function returns a potentially modified version of kvList, which
|
||||
// ensures that there is a value for every key (adding a value if needed) and
|
||||
// that each key is a string (substituting a key if needed).
|
||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
|
||||
// This logic overlaps with sanitize() but saves one type-cast per key,
|
||||
// which can be measurable.
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
k, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
k = f.nonStringKey(kvList[i])
|
||||
kvList[i] = k
|
||||
}
|
||||
v := kvList[i+1]
|
||||
|
||||
if i > 0 || continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
// In theory the format could be something we don't understand. In
|
||||
// practice, we control it, so it won't be.
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
if escapeKeys {
|
||||
buf.WriteString(prettyString(k))
|
||||
} else {
|
||||
// this is faster
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(':')
|
||||
} else {
|
||||
buf.WriteByte('=')
|
||||
}
|
||||
buf.WriteString(f.pretty(v))
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
func (f Formatter) pretty(value interface{}) string {
|
||||
return f.prettyWithFlags(value, 0, 0)
|
||||
}
|
||||
|
||||
const (
|
||||
flagRawStruct = 0x1 // do not print braces on structs
|
||||
)
|
||||
|
||||
// TODO: This is not fast. Most of the overhead goes here.
|
||||
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
|
||||
if depth > f.opts.MaxLogDepth {
|
||||
return `"<max-log-depth-exceeded>"`
|
||||
}
|
||||
|
||||
// Handle types that take full control of logging.
|
||||
if v, ok := value.(logr.Marshaler); ok {
|
||||
// Replace the value with what the type wants to get logged.
|
||||
// That then gets handled below via reflection.
|
||||
value = invokeMarshaler(v)
|
||||
}
|
||||
|
||||
// Handle types that want to format themselves.
|
||||
switch v := value.(type) {
|
||||
case fmt.Stringer:
|
||||
value = invokeStringer(v)
|
||||
case error:
|
||||
value = invokeError(v)
|
||||
}
|
||||
|
||||
// Handling the most common types without reflect is a small perf win.
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
return strconv.FormatBool(v)
|
||||
case string:
|
||||
return prettyString(v)
|
||||
case int:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int8:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int16:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int32:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int64:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case uint:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint8:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint16:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint32:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint64:
|
||||
return strconv.FormatUint(v, 10)
|
||||
case uintptr:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case float32:
|
||||
return strconv.FormatFloat(float64(v), 'f', -1, 32)
|
||||
case float64:
|
||||
return strconv.FormatFloat(v, 'f', -1, 64)
|
||||
case complex64:
|
||||
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
|
||||
case complex128:
|
||||
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
|
||||
case PseudoStruct:
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
v = f.sanitize(v)
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||
// arbitrary keys might need escaping
|
||||
buf.WriteString(prettyString(k))
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
t := reflect.TypeOf(value)
|
||||
if t == nil {
|
||||
return "null"
|
||||
}
|
||||
v := reflect.ValueOf(value)
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return strconv.FormatBool(v.Bool())
|
||||
case reflect.String:
|
||||
return prettyString(v.String())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(int64(v.Int()), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return strconv.FormatUint(uint64(v.Uint()), 10)
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
|
||||
case reflect.Float64:
|
||||
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
|
||||
case reflect.Complex64:
|
||||
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
|
||||
case reflect.Complex128:
|
||||
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
|
||||
case reflect.Struct:
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fld := t.Field(i)
|
||||
if fld.PkgPath != "" {
|
||||
// reflect says this field is only defined for non-exported fields.
|
||||
continue
|
||||
}
|
||||
if !v.Field(i).CanInterface() {
|
||||
// reflect isn't clear exactly what this means, but we can't use it.
|
||||
continue
|
||||
}
|
||||
name := ""
|
||||
omitempty := false
|
||||
if tag, found := fld.Tag.Lookup("json"); found {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
if comma := strings.Index(tag, ","); comma != -1 {
|
||||
if n := tag[:comma]; n != "" {
|
||||
name = n
|
||||
}
|
||||
rest := tag[comma:]
|
||||
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
|
||||
omitempty = true
|
||||
}
|
||||
} else {
|
||||
name = tag
|
||||
}
|
||||
}
|
||||
if omitempty && isEmpty(v.Field(i)) {
|
||||
continue
|
||||
}
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
|
||||
continue
|
||||
}
|
||||
if name == "" {
|
||||
name = fld.Name
|
||||
}
|
||||
// field names can't contain characters which need escaping
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(name)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
case reflect.Slice, reflect.Array:
|
||||
buf.WriteByte('[')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
e := v.Index(i)
|
||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
return buf.String()
|
||||
case reflect.Map:
|
||||
buf.WriteByte('{')
|
||||
// This does not sort the map keys, for best perf.
|
||||
it := v.MapRange()
|
||||
i := 0
|
||||
for it.Next() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
// If a map key supports TextMarshaler, use it.
|
||||
keystr := ""
|
||||
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
|
||||
txt, err := m.MarshalText()
|
||||
if err != nil {
|
||||
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
|
||||
} else {
|
||||
keystr = string(txt)
|
||||
}
|
||||
keystr = prettyString(keystr)
|
||||
} else {
|
||||
// prettyWithFlags will produce already-escaped values
|
||||
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
|
||||
if t.Key().Kind() != reflect.String {
|
||||
// JSON only does string keys. Unlike Go's standard JSON, we'll
|
||||
// convert just about anything to a string.
|
||||
keystr = prettyString(keystr)
|
||||
}
|
||||
}
|
||||
buf.WriteString(keystr)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||
i++
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return buf.String()
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return "null"
|
||||
}
|
||||
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
|
||||
}
|
||||
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
|
||||
}
|
||||
|
||||
func prettyString(s string) string {
|
||||
// Avoid escaping (which does allocations) if we can.
|
||||
if needsEscape(s) {
|
||||
return strconv.Quote(s)
|
||||
}
|
||||
b := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
b.WriteByte('"')
|
||||
b.WriteString(s)
|
||||
b.WriteByte('"')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// needsEscape determines whether the input string needs to be escaped or not,
|
||||
// without doing any allocations.
|
||||
func needsEscape(s string) bool {
|
||||
for _, r := range s {
|
||||
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return v.Complex() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return m.MarshalLog()
|
||||
}
|
||||
|
||||
func invokeStringer(s fmt.Stringer) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func invokeError(e error) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// Caller represents the original call site for a log line, after considering
|
||||
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
|
||||
// Line fields will always be provided, while the Func field is optional.
|
||||
// Users can set the render hook fields in Options to examine logged key-value
|
||||
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
|
||||
// field is enabled for the given MessageClass.
|
||||
type Caller struct {
|
||||
// File is the basename of the file for this call site.
|
||||
File string `json:"file"`
|
||||
// Line is the line number in the file for this call site.
|
||||
Line int `json:"line"`
|
||||
// Func is the function name for this call site, or empty if
|
||||
// Options.LogCallerFunc is not enabled.
|
||||
Func string `json:"function,omitempty"`
|
||||
}
|
||||
|
||||
func (f Formatter) caller() Caller {
|
||||
// +1 for this frame, +1 for Info/Error.
|
||||
pc, file, line, ok := runtime.Caller(f.depth + 2)
|
||||
if !ok {
|
||||
return Caller{"<unknown>", 0, ""}
|
||||
}
|
||||
fn := ""
|
||||
if f.opts.LogCallerFunc {
|
||||
if fp := runtime.FuncForPC(pc); fp != nil {
|
||||
fn = fp.Name()
|
||||
}
|
||||
}
|
||||
|
||||
return Caller{filepath.Base(file), line, fn}
|
||||
}
|
||||
|
||||
const noValue = "<no-value>"
|
||||
|
||||
func (f Formatter) nonStringKey(v interface{}) string {
|
||||
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
|
||||
}
|
||||
|
||||
// snippet produces a short snippet string of an arbitrary value.
|
||||
func (f Formatter) snippet(v interface{}) string {
|
||||
const snipLen = 16
|
||||
|
||||
snip := f.pretty(v)
|
||||
if len(snip) > snipLen {
|
||||
snip = snip[:snipLen]
|
||||
}
|
||||
return snip
|
||||
}
|
||||
|
||||
// sanitize ensures that a list of key-value pairs has a value for every key
|
||||
// (adding a value if needed) and that each key is a string (substituting a key
|
||||
// if needed).
|
||||
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
_, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
kvList[i] = f.nonStringKey(kvList[i])
|
||||
}
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
// Init configures this Formatter from runtime info, such as the call depth
|
||||
// imposed by logr itself.
|
||||
// Note that this receiver is a pointer, so depth can be saved.
|
||||
func (f *Formatter) Init(info logr.RuntimeInfo) {
|
||||
f.depth += info.CallDepth
|
||||
}
|
||||
|
||||
// Enabled checks whether an info message at the given level should be logged.
|
||||
func (f Formatter) Enabled(level int) bool {
|
||||
return level <= f.opts.Verbosity
|
||||
}
|
||||
|
||||
// GetDepth returns the current depth of this Formatter. This is useful for
|
||||
// implementations which do their own caller attribution.
|
||||
func (f Formatter) GetDepth() int {
|
||||
return f.depth
|
||||
}
|
||||
|
||||
// FormatInfo renders an Info log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
prefix = ""
|
||||
}
|
||||
if f.opts.LogTimestamp {
|
||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||
}
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "level", level, "msg", msg)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// FormatError renders an Error log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
prefix = ""
|
||||
}
|
||||
if f.opts.LogTimestamp {
|
||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||
}
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Error {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
var loggableErr interface{}
|
||||
if err != nil {
|
||||
loggableErr = err.Error()
|
||||
}
|
||||
args = append(args, "error", loggableErr)
|
||||
return f.prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// AddName appends the specified name. funcr uses '/' characters to separate
|
||||
// name elements. Callers should not pass '/' in the provided name string, but
|
||||
// this library does not actually enforce that.
|
||||
func (f *Formatter) AddName(name string) {
|
||||
if len(f.prefix) > 0 {
|
||||
f.prefix += "/"
|
||||
}
|
||||
f.prefix += name
|
||||
}
|
||||
|
||||
// AddValues adds key-value pairs to the set of saved values to be logged with
|
||||
// each log line.
|
||||
func (f *Formatter) AddValues(kvList []interface{}) {
|
||||
// Three slice args forces a copy.
|
||||
n := len(f.values)
|
||||
f.values = append(f.values[:n:n], kvList...)
|
||||
|
||||
vals := f.values
|
||||
if hook := f.opts.RenderValuesHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
|
||||
// Pre-render values, so we don't have to do it on each Info/Error call.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
f.flatten(buf, vals, false, true) // escape user-provided keys
|
||||
f.valuesStr = buf.String()
|
||||
}
|
||||
|
||||
// AddCallDepth increases the number of stack-frames to skip when attributing
|
||||
// the log line to a file and line.
|
||||
func (f *Formatter) AddCallDepth(depth int) {
|
||||
f.depth += depth
|
||||
}
|
510
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
510
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
|
@ -0,0 +1,510 @@
|
|||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This design derives from Dave Cheney's blog:
|
||||
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||
|
||||
// Package logr defines a general-purpose logging API and abstract interfaces
|
||||
// to back that API. Packages in the Go ecosystem can depend on this package,
|
||||
// while callers can implement logging with whatever backend is appropriate.
|
||||
//
|
||||
// Usage
|
||||
//
|
||||
// Logging is done using a Logger instance. Logger is a concrete type with
|
||||
// methods, which defers the actual logging to a LogSink interface. The main
|
||||
// methods of Logger are Info() and Error(). Arguments to Info() and Error()
|
||||
// are key/value pairs rather than printf-style formatted strings, emphasizing
|
||||
// "structured logging".
|
||||
//
|
||||
// With Go's standard log package, we might write:
|
||||
// log.Printf("setting target value %s", targetValue)
|
||||
//
|
||||
// With logr's structured logging, we'd write:
|
||||
// logger.Info("setting target", "value", targetValue)
|
||||
//
|
||||
// Errors are much the same. Instead of:
|
||||
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
|
||||
//
|
||||
// We'd write:
|
||||
// logger.Error(err, "failed to open the pod bay door", "user", user)
|
||||
//
|
||||
// Info() and Error() are very similar, but they are separate methods so that
|
||||
// LogSink implementations can choose to do things like attach additional
|
||||
// information (such as stack traces) on calls to Error(). Error() messages are
|
||||
// always logged, regardless of the current verbosity. If there is no error
|
||||
// instance available, passing nil is valid.
|
||||
//
|
||||
// Verbosity
|
||||
//
|
||||
// Often we want to log information only when the application in "verbose
|
||||
// mode". To write log lines that are more verbose, Logger has a V() method.
|
||||
// The higher the V-level of a log line, the less critical it is considered.
|
||||
// Log-lines with V-levels that are not enabled (as per the LogSink) will not
|
||||
// be written. Level V(0) is the default, and logger.V(0).Info() has the same
|
||||
// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
|
||||
// Error messages do not have a verbosity level and are always logged.
|
||||
//
|
||||
// Where we might have written:
|
||||
// if flVerbose >= 2 {
|
||||
// log.Printf("an unusual thing happened")
|
||||
// }
|
||||
//
|
||||
// We can write:
|
||||
// logger.V(2).Info("an unusual thing happened")
|
||||
//
|
||||
// Logger Names
|
||||
//
|
||||
// Logger instances can have name strings so that all messages logged through
|
||||
// that instance have additional context. For example, you might want to add
|
||||
// a subsystem name:
|
||||
//
|
||||
// logger.WithName("compactor").Info("started", "time", time.Now())
|
||||
//
|
||||
// The WithName() method returns a new Logger, which can be passed to
|
||||
// constructors or other functions for further use. Repeated use of WithName()
|
||||
// will accumulate name "segments". These name segments will be joined in some
|
||||
// way by the LogSink implementation. It is strongly recommended that name
|
||||
// segments contain simple identifiers (letters, digits, and hyphen), and do
|
||||
// not contain characters that could muddle the log output or confuse the
|
||||
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
|
||||
// quotes, etc).
|
||||
//
|
||||
// Saved Values
|
||||
//
|
||||
// Logger instances can store any number of key/value pairs, which will be
|
||||
// logged alongside all messages logged through that instance. For example,
|
||||
// you might want to create a Logger instance per managed object:
|
||||
//
|
||||
// With the standard log package, we might write:
|
||||
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
||||
// targetValue, object.Namespace, object.Name)
|
||||
//
|
||||
// With logr we'd write:
|
||||
// // Elsewhere: set up the logger to log the object name.
|
||||
// obj.logger = mainLogger.WithValues(
|
||||
// "name", obj.name, "namespace", obj.namespace)
|
||||
//
|
||||
// // later on...
|
||||
// obj.logger.Info("setting foo", "value", targetValue)
|
||||
//
|
||||
// Best Practices
|
||||
//
|
||||
// Logger has very few hard rules, with the goal that LogSink implementations
|
||||
// might have a lot of freedom to differentiate. There are, however, some
|
||||
// things to consider.
|
||||
//
|
||||
// The log message consists of a constant message attached to the log line.
|
||||
// This should generally be a simple description of what's occurring, and should
|
||||
// never be a format string. Variable information can then be attached using
|
||||
// named values.
|
||||
//
|
||||
// Keys are arbitrary strings, but should generally be constant values. Values
|
||||
// may be any Go value, but how the value is formatted is determined by the
|
||||
// LogSink implementation.
|
||||
//
|
||||
// Logger instances are meant to be passed around by value. Code that receives
|
||||
// such a value can call its methods without having to check whether the
|
||||
// instance is ready for use.
|
||||
//
|
||||
// Calling methods with the null logger (Logger{}) as instance will crash
|
||||
// because it has no LogSink. Therefore this null logger should never be passed
|
||||
// around. For cases where passing a logger is optional, a pointer to Logger
|
||||
// should be used.
|
||||
//
|
||||
// Key Naming Conventions
|
||||
//
|
||||
// Keys are not strictly required to conform to any specification or regex, but
|
||||
// it is recommended that they:
|
||||
// * be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||
// * be constant (not dependent on input data)
|
||||
// * contain only printable characters
|
||||
// * not contain whitespace or punctuation
|
||||
// * use lower case for simple keys and lowerCamelCase for more complex ones
|
||||
//
|
||||
// These guidelines help ensure that log data is processed properly regardless
|
||||
// of the log implementation. For example, log implementations will try to
|
||||
// output JSON data or will store data for later database (e.g. SQL) queries.
|
||||
//
|
||||
// While users are generally free to use key names of their choice, it's
|
||||
// generally best to avoid using the following keys, as they're frequently used
|
||||
// by implementations:
|
||||
// * "caller": the calling information (file/line) of a particular log line
|
||||
// * "error": the underlying error value in the `Error` method
|
||||
// * "level": the log level
|
||||
// * "logger": the name of the associated logger
|
||||
// * "msg": the log message
|
||||
// * "stacktrace": the stack trace associated with a particular log line or
|
||||
// error (often from the `Error` message)
|
||||
// * "ts": the timestamp for a log line
|
||||
//
|
||||
// Implementations are encouraged to make use of these keys to represent the
|
||||
// above concepts, when necessary (for example, in a pure-JSON output form, it
|
||||
// would be necessary to represent at least message and timestamp as ordinary
|
||||
// named values).
|
||||
//
|
||||
// Break Glass
|
||||
//
|
||||
// Implementations may choose to give callers access to the underlying
|
||||
// logging implementation. The recommended pattern for this is:
|
||||
// // Underlier exposes access to the underlying logging implementation.
|
||||
// // Since callers only have a logr.Logger, they have to know which
|
||||
// // implementation is in use, so this interface is less of an abstraction
|
||||
// // and more of way to test type conversion.
|
||||
// type Underlier interface {
|
||||
// GetUnderlying() <underlying-type>
|
||||
// }
|
||||
//
|
||||
// Logger grants access to the sink to enable type assertions like this:
|
||||
// func DoSomethingWithImpl(log logr.Logger) {
|
||||
// if underlier, ok := log.GetSink()(impl.Underlier) {
|
||||
// implLogger := underlier.GetUnderlying()
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Custom `With*` functions can be implemented by copying the complete
|
||||
// Logger struct and replacing the sink in the copy:
|
||||
// // WithFooBar changes the foobar parameter in the log sink and returns a
|
||||
// // new logger with that modified sink. It does nothing for loggers where
|
||||
// // the sink doesn't support that parameter.
|
||||
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
|
||||
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
|
||||
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
|
||||
// }
|
||||
// return log
|
||||
// }
|
||||
//
|
||||
// Don't use New to construct a new Logger with a LogSink retrieved from an
|
||||
// existing Logger. Source code attribution might not work correctly and
|
||||
// unexported fields in Logger get lost.
|
||||
//
|
||||
// Beware that the same LogSink instance may be shared by different logger
|
||||
// instances. Calling functions that modify the LogSink will affect all of
|
||||
// those.
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// New returns a new Logger instance. This is primarily used by libraries
|
||||
// implementing LogSink, rather than end users.
|
||||
func New(sink LogSink) Logger {
|
||||
logger := Logger{}
|
||||
logger.setSink(sink)
|
||||
sink.Init(runtimeInfo)
|
||||
return logger
|
||||
}
|
||||
|
||||
// setSink stores the sink and updates any related fields. It mutates the
|
||||
// logger and thus is only safe to use for loggers that are not currently being
|
||||
// used concurrently.
|
||||
func (l *Logger) setSink(sink LogSink) {
|
||||
l.sink = sink
|
||||
}
|
||||
|
||||
// GetSink returns the stored sink.
|
||||
func (l Logger) GetSink() LogSink {
|
||||
return l.sink
|
||||
}
|
||||
|
||||
// WithSink returns a copy of the logger with the new sink.
|
||||
func (l Logger) WithSink(sink LogSink) Logger {
|
||||
l.setSink(sink)
|
||||
return l
|
||||
}
|
||||
|
||||
// Logger is an interface to an abstract logging implementation. This is a
|
||||
// concrete type for performance reasons, but all the real work is passed on to
|
||||
// a LogSink. Implementations of LogSink should provide their own constructors
|
||||
// that return Logger, not LogSink.
|
||||
//
|
||||
// The underlying sink can be accessed through GetSink and be modified through
|
||||
// WithSink. This enables the implementation of custom extensions (see "Break
|
||||
// Glass" in the package documentation). Normally the sink should be used only
|
||||
// indirectly.
|
||||
type Logger struct {
|
||||
sink LogSink
|
||||
level int
|
||||
}
|
||||
|
||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||
// flags might be used to set the logging verbosity and disable some info logs.
|
||||
func (l Logger) Enabled() bool {
|
||||
return l.sink.Enabled(l.level)
|
||||
}
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
//
|
||||
// The msg argument should be used to add some constant description to the log
|
||||
// line. The key/value pairs can then be used to add additional variable
|
||||
// information. The key/value pairs must alternate string keys and arbitrary
|
||||
// values.
|
||||
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
||||
if l.Enabled() {
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
l.sink.Info(l.level, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as context.
|
||||
// It functions similarly to Info, but may have unique behavior, and should be
|
||||
// preferred for logging errors (see the package documentations for more
|
||||
// information). The log message will always be emitted, regardless of
|
||||
// verbosity level.
|
||||
//
|
||||
// The msg argument should be used to add context to any underlying error,
|
||||
// while the err argument should be used to attach the actual error that
|
||||
// triggered this log line, if present. The err parameter is optional
|
||||
// and nil may be passed instead of an error instance.
|
||||
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
l.sink.Error(err, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// V returns a new Logger instance for a specific verbosity level, relative to
|
||||
// this Logger. In other words, V-levels are additive. A higher verbosity
|
||||
// level means a log message is less important. Negative V-levels are treated
|
||||
// as 0.
|
||||
func (l Logger) V(level int) Logger {
|
||||
if level < 0 {
|
||||
level = 0
|
||||
}
|
||||
l.level += level
|
||||
return l
|
||||
}
|
||||
|
||||
// WithValues returns a new Logger instance with additional key/value pairs.
|
||||
// See Info for documentation on how key/value pairs work.
|
||||
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
|
||||
l.setSink(l.sink.WithValues(keysAndValues...))
|
||||
return l
|
||||
}
|
||||
|
||||
// WithName returns a new Logger instance with the specified name element added
|
||||
// to the Logger's name. Successive calls with WithName append additional
|
||||
// suffixes to the Logger's name. It's strongly recommended that name segments
|
||||
// contain only letters, digits, and hyphens (see the package documentation for
|
||||
// more information).
|
||||
func (l Logger) WithName(name string) Logger {
|
||||
l.setSink(l.sink.WithName(name))
|
||||
return l
|
||||
}
|
||||
|
||||
// WithCallDepth returns a Logger instance that offsets the call stack by the
|
||||
// specified number of frames when logging call site information, if possible.
|
||||
// This is useful for users who have helper functions between the "real" call
|
||||
// site and the actual calls to Logger methods. If depth is 0 the attribution
|
||||
// should be to the direct caller of this function. If depth is 1 the
|
||||
// attribution should skip 1 call frame, and so on. Successive calls to this
|
||||
// are additive.
|
||||
//
|
||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||
// it will be called and the result returned. If the implementation does not
|
||||
// support CallDepthLogSink, the original Logger will be returned.
|
||||
//
|
||||
// To skip one level, WithCallStackHelper() should be used instead of
|
||||
// WithCallDepth(1) because it works with implementions that support the
|
||||
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
|
||||
func (l Logger) WithCallDepth(depth int) Logger {
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(depth))
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// WithCallStackHelper returns a new Logger instance that skips the direct
|
||||
// caller when logging call site information, if possible. This is useful for
|
||||
// users who have helper functions between the "real" call site and the actual
|
||||
// calls to Logger methods and want to support loggers which depend on marking
|
||||
// each individual helper function, like loggers based on testing.T.
|
||||
//
|
||||
// In addition to using that new logger instance, callers also must call the
|
||||
// returned function.
|
||||
//
|
||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||
// WithCallDepth(1) will be called to produce a new logger. If it supports a
|
||||
// WithCallStackHelper() method, that will be also called. If the
|
||||
// implementation does not support either of these, the original Logger will be
|
||||
// returned.
|
||||
func (l Logger) WithCallStackHelper() (func(), Logger) {
|
||||
var helper func()
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(1))
|
||||
}
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
helper = withHelper.GetCallStackHelper()
|
||||
} else {
|
||||
helper = func() {}
|
||||
}
|
||||
return helper, l
|
||||
}
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context.
|
||||
type contextKey struct{}
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// RuntimeInfo holds information that the logr "core" library knows which
|
||||
// LogSinks might want to know.
|
||||
type RuntimeInfo struct {
|
||||
// CallDepth is the number of call frames the logr library adds between the
|
||||
// end-user and the LogSink. LogSink implementations which choose to print
|
||||
// the original logging site (e.g. file & line) should climb this many
|
||||
// additional frames to find it.
|
||||
CallDepth int
|
||||
}
|
||||
|
||||
// runtimeInfo is a static global. It must not be changed at run time.
|
||||
var runtimeInfo = RuntimeInfo{
|
||||
CallDepth: 1,
|
||||
}
|
||||
|
||||
// LogSink represents a logging implementation. End-users will generally not
|
||||
// interact with this type.
|
||||
type LogSink interface {
|
||||
// Init receives optional information about the logr library for LogSink
|
||||
// implementations that need it.
|
||||
Init(info RuntimeInfo)
|
||||
|
||||
// Enabled tests whether this LogSink is enabled at the specified V-level.
|
||||
// For example, commandline flags might be used to set the logging
|
||||
// verbosity and disable some info logs.
|
||||
Enabled(level int) bool
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
// The level argument is provided for optional logging. This method will
|
||||
// only be called when Enabled(level) is true. See Logger.Info for more
|
||||
// details.
|
||||
Info(level int, msg string, keysAndValues ...interface{})
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as
|
||||
// context. See Logger.Error for more details.
|
||||
Error(err error, msg string, keysAndValues ...interface{})
|
||||
|
||||
// WithValues returns a new LogSink with additional key/value pairs. See
|
||||
// Logger.WithValues for more details.
|
||||
WithValues(keysAndValues ...interface{}) LogSink
|
||||
|
||||
// WithName returns a new LogSink with the specified name appended. See
|
||||
// Logger.WithName for more details.
|
||||
WithName(name string) LogSink
|
||||
}
|
||||
|
||||
// CallDepthLogSink represents a Logger that knows how to climb the call stack
|
||||
// to identify the original call site and can offset the depth by a specified
|
||||
// number of frames. This is useful for users who have helper functions
|
||||
// between the "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as file,
|
||||
// function, or line) would otherwise log information about the intermediate
|
||||
// helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required to
|
||||
// support it.
|
||||
type CallDepthLogSink interface {
|
||||
// WithCallDepth returns a LogSink that will offset the call
|
||||
// stack by the specified number of frames when logging call
|
||||
// site information.
|
||||
//
|
||||
// If depth is 0, the LogSink should skip exactly the number
|
||||
// of call frames defined in RuntimeInfo.CallDepth when Info
|
||||
// or Error are called, i.e. the attribution should be to the
|
||||
// direct caller of Logger.Info or Logger.Error.
|
||||
//
|
||||
// If depth is 1 the attribution should skip 1 call frame, and so on.
|
||||
// Successive calls to this are additive.
|
||||
WithCallDepth(depth int) LogSink
|
||||
}
|
||||
|
||||
// CallStackHelperLogSink represents a Logger that knows how to climb
|
||||
// the call stack to identify the original call site and can skip
|
||||
// intermediate helper functions if they mark themselves as
|
||||
// helper. Go's testing package uses that approach.
|
||||
//
|
||||
// This is useful for users who have helper functions between the
|
||||
// "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as
|
||||
// file, function, or line) would otherwise log information about the
|
||||
// intermediate helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required
|
||||
// to support it. Implementations that choose to support this must not
|
||||
// simply implement it as WithCallDepth(1), because
|
||||
// Logger.WithCallStackHelper will call both methods if they are
|
||||
// present. This should only be implemented for LogSinks that actually
|
||||
// need it, as with testing.T.
|
||||
type CallStackHelperLogSink interface {
|
||||
// GetCallStackHelper returns a function that must be called
|
||||
// to mark the direct caller as helper function when logging
|
||||
// call site information.
|
||||
GetCallStackHelper() func()
|
||||
}
|
||||
|
||||
// Marshaler is an optional interface that logged values may choose to
|
||||
// implement. Loggers with structured output, such as JSON, should
|
||||
// log the object return by the MarshalLog method instead of the
|
||||
// original value.
|
||||
type Marshaler interface {
|
||||
// MarshalLog can be used to:
|
||||
// - ensure that structs are not logged as strings when the original
|
||||
// value has a String method: return a different type without a
|
||||
// String method
|
||||
// - select which fields of a complex type should get logged:
|
||||
// return a simpler struct with fewer fields
|
||||
// - log unexported fields: return a different struct
|
||||
// with exported fields
|
||||
//
|
||||
// It may return any value of any type.
|
||||
MarshalLog() interface{}
|
||||
}
|
201
vendor/github.com/go-logr/stdr/LICENSE
generated
vendored
Normal file
201
vendor/github.com/go-logr/stdr/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
6
vendor/github.com/go-logr/stdr/README.md
generated
vendored
Normal file
6
vendor/github.com/go-logr/stdr/README.md
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Minimal Go logging using logr and Go's standard library
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
|
||||
|
||||
This package implements the [logr interface](https://github.com/go-logr/logr)
|
||||
in terms of Go's standard log package(https://pkg.go.dev/log).
|
170
vendor/github.com/go-logr/stdr/stdr.go
generated
vendored
Normal file
170
vendor/github.com/go-logr/stdr/stdr.go
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package stdr implements github.com/go-logr/logr.Logger in terms of
|
||||
// Go's standard log package.
|
||||
package stdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/go-logr/logr/funcr"
|
||||
)
|
||||
|
||||
// The global verbosity level. See SetVerbosity().
|
||||
var globalVerbosity int
|
||||
|
||||
// SetVerbosity sets the global level against which all info logs will be
|
||||
// compared. If this is greater than or equal to the "V" of the logger, the
|
||||
// message will be logged. A higher value here means more logs will be written.
|
||||
// The previous verbosity value is returned. This is not concurrent-safe -
|
||||
// callers must be sure to call it from only one goroutine.
|
||||
func SetVerbosity(v int) int {
|
||||
old := globalVerbosity
|
||||
globalVerbosity = v
|
||||
return old
|
||||
}
|
||||
|
||||
// New returns a logr.Logger which is implemented by Go's standard log package,
|
||||
// or something like it. If std is nil, this will use a default logger
|
||||
// instead.
|
||||
//
|
||||
// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
|
||||
func New(std StdLogger) logr.Logger {
|
||||
return NewWithOptions(std, Options{})
|
||||
}
|
||||
|
||||
// NewWithOptions returns a logr.Logger which is implemented by Go's standard
|
||||
// log package, or something like it. See New for details.
|
||||
func NewWithOptions(std StdLogger, opts Options) logr.Logger {
|
||||
if std == nil {
|
||||
// Go's log.Default() is only available in 1.16 and higher.
|
||||
std = log.New(os.Stderr, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
if opts.Depth < 0 {
|
||||
opts.Depth = 0
|
||||
}
|
||||
|
||||
fopts := funcr.Options{
|
||||
LogCaller: funcr.MessageClass(opts.LogCaller),
|
||||
}
|
||||
|
||||
sl := &logger{
|
||||
Formatter: funcr.NewFormatter(fopts),
|
||||
std: std,
|
||||
}
|
||||
|
||||
// For skipping our own logger.Info/Error.
|
||||
sl.Formatter.AddCallDepth(1 + opts.Depth)
|
||||
|
||||
return logr.New(sl)
|
||||
}
|
||||
|
||||
// Options carries parameters which influence the way logs are generated.
|
||||
type Options struct {
|
||||
// Depth biases the assumed number of call frames to the "true" caller.
|
||||
// This is useful when the calling code calls a function which then calls
|
||||
// stdr (e.g. a logging shim to another API). Values less than zero will
|
||||
// be treated as zero.
|
||||
Depth int
|
||||
|
||||
// LogCaller tells stdr to add a "caller" key to some or all log lines.
|
||||
// Go's log package has options to log this natively, too.
|
||||
LogCaller MessageClass
|
||||
|
||||
// TODO: add an option to log the date/time
|
||||
}
|
||||
|
||||
// MessageClass indicates which category or categories of messages to consider.
|
||||
type MessageClass int
|
||||
|
||||
const (
|
||||
// None ignores all message classes.
|
||||
None MessageClass = iota
|
||||
// All considers all message classes.
|
||||
All
|
||||
// Info only considers info messages.
|
||||
Info
|
||||
// Error only considers error messages.
|
||||
Error
|
||||
)
|
||||
|
||||
// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
|
||||
// this adapter.
|
||||
type StdLogger interface {
|
||||
// Output is the same as log.Output and log.Logger.Output.
|
||||
Output(calldepth int, logline string) error
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
funcr.Formatter
|
||||
std StdLogger
|
||||
}
|
||||
|
||||
var _ logr.LogSink = &logger{}
|
||||
var _ logr.CallDepthLogSink = &logger{}
|
||||
|
||||
func (l logger) Enabled(level int) bool {
|
||||
return globalVerbosity >= level
|
||||
}
|
||||
|
||||
func (l logger) Info(level int, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||
if prefix != "" {
|
||||
args = prefix + ": " + args
|
||||
}
|
||||
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||
}
|
||||
|
||||
func (l logger) Error(err error, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatError(err, msg, kvList)
|
||||
if prefix != "" {
|
||||
args = prefix + ": " + args
|
||||
}
|
||||
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||
}
|
||||
|
||||
func (l logger) WithName(name string) logr.LogSink {
|
||||
l.Formatter.AddName(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l logger) WithCallDepth(depth int) logr.LogSink {
|
||||
l.Formatter.AddCallDepth(depth)
|
||||
return &l
|
||||
}
|
||||
|
||||
// Underlier exposes access to the underlying logging implementation. Since
|
||||
// callers only have a logr.Logger, they have to know which implementation is
|
||||
// in use, so this interface is less of an abstraction and more of way to test
|
||||
// type conversion.
|
||||
type Underlier interface {
|
||||
GetUnderlying() StdLogger
|
||||
}
|
||||
|
||||
// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
|
||||
// is itself an interface, the result may or may not be a Go log.Logger.
|
||||
func (l logger) GetUnderlying() StdLogger {
|
||||
return l.std
|
||||
}
|
524
vendor/github.com/golang/protobuf/jsonpb/decode.go
generated
vendored
Normal file
524
vendor/github.com/golang/protobuf/jsonpb/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,524 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapJSONUnmarshalV2 = false
|
||||
|
||||
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||
return new(Unmarshaler).UnmarshalNext(d, m)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a JSON object from r into m.
|
||||
func Unmarshal(r io.Reader, m proto.Message) error {
|
||||
return new(Unmarshaler).Unmarshal(r, m)
|
||||
}
|
||||
|
||||
// UnmarshalString unmarshals a JSON object from s into m.
|
||||
func UnmarshalString(s string, m proto.Message) error {
|
||||
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
|
||||
}
|
||||
|
||||
// Unmarshaler is a configurable object for converting from a JSON
|
||||
// representation to a protocol buffer object.
|
||||
type Unmarshaler struct {
|
||||
// AllowUnknownFields specifies whether to allow messages to contain
|
||||
// unknown JSON fields, as opposed to failing to unmarshal.
|
||||
AllowUnknownFields bool
|
||||
|
||||
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||
// If unset, the global registry is used by default.
|
||||
AnyResolver AnyResolver
|
||||
}
|
||||
|
||||
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
|
||||
// they are unmarshaled from JSON. Messages that implement this should also
|
||||
// implement JSONPBMarshaler so that the custom format can be produced.
|
||||
//
|
||||
// The JSON unmarshaling must follow the JSON to proto specification:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
//
|
||||
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||
type JSONPBUnmarshaler interface {
|
||||
UnmarshalJSONPB(*Unmarshaler, []byte) error
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a JSON object from r into m.
|
||||
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
|
||||
return u.UnmarshalNext(json.NewDecoder(r), m)
|
||||
}
|
||||
|
||||
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||
if m == nil {
|
||||
return errors.New("invalid nil message")
|
||||
}
|
||||
|
||||
// Parse the next JSON object from the stream.
|
||||
raw := json.RawMessage{}
|
||||
if err := d.Decode(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for custom unmarshalers first since they may not properly
|
||||
// implement protobuf reflection that the logic below relies on.
|
||||
if jsu, ok := m.(JSONPBUnmarshaler); ok {
|
||||
return jsu.UnmarshalJSONPB(u, raw)
|
||||
}
|
||||
|
||||
mr := proto.MessageReflect(m)
|
||||
|
||||
// NOTE: For historical reasons, a top-level null is treated as a noop.
|
||||
// This is incorrect, but kept for compatibility.
|
||||
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if wrapJSONUnmarshalV2 {
|
||||
// NOTE: If input message is non-empty, we need to preserve merge semantics
|
||||
// of the old jsonpb implementation. These semantics are not supported by
|
||||
// the protobuf JSON specification.
|
||||
isEmpty := true
|
||||
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
|
||||
isEmpty = false // at least one iteration implies non-empty
|
||||
return false
|
||||
})
|
||||
if !isEmpty {
|
||||
// Perform unmarshaling into a newly allocated, empty message.
|
||||
mr = mr.New()
|
||||
|
||||
// Use a defer to copy all unmarshaled fields into the original message.
|
||||
dst := proto.MessageReflect(m)
|
||||
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
dst.Set(fd, v)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Unmarshal using the v2 JSON unmarshaler.
|
||||
opts := protojson.UnmarshalOptions{
|
||||
DiscardUnknown: u.AllowUnknownFields,
|
||||
}
|
||||
if u.AnyResolver != nil {
|
||||
opts.Resolver = anyResolver{u.AnyResolver}
|
||||
}
|
||||
return opts.Unmarshal(raw, mr.Interface())
|
||||
} else {
|
||||
if err := u.unmarshalMessage(mr, raw); err != nil {
|
||||
return err
|
||||
}
|
||||
return protoV2.CheckInitialized(mr.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
|
||||
return jsu.UnmarshalJSONPB(u, in)
|
||||
}
|
||||
|
||||
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch wellKnownType(md.FullName()) {
|
||||
case "Any":
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rawTypeURL, ok := jsonObject["@type"]
|
||||
if !ok {
|
||||
return errors.New("Any JSON doesn't have '@type'")
|
||||
}
|
||||
typeURL, err := unquoteString(string(rawTypeURL))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
|
||||
}
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
|
||||
|
||||
var m2 protoreflect.Message
|
||||
if u.AnyResolver != nil {
|
||||
mi, err := u.AnyResolver.Resolve(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = proto.MessageReflect(mi)
|
||||
} else {
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
if err == protoregistry.NotFound {
|
||||
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
m2 = mt.New()
|
||||
}
|
||||
|
||||
if wellKnownType(m2.Descriptor().FullName()) != "" {
|
||||
rawValue, ok := jsonObject["value"]
|
||||
if !ok {
|
||||
return errors.New("Any JSON doesn't have 'value'")
|
||||
}
|
||||
if err := u.unmarshalMessage(m2, rawValue); err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||
}
|
||||
} else {
|
||||
delete(jsonObject, "@type")
|
||||
rawJSON, err := json.Marshal(jsonObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
|
||||
}
|
||||
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
rawWire, err := protoV2.Marshal(m2.Interface())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
|
||||
}
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
|
||||
return nil
|
||||
case "BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue":
|
||||
fd := fds.ByNumber(1)
|
||||
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
return nil
|
||||
case "Duration":
|
||||
v, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Duration: %v", err)
|
||||
}
|
||||
|
||||
sec := d.Nanoseconds() / 1e9
|
||||
nsec := d.Nanoseconds() % 1e9
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||
return nil
|
||||
case "Timestamp":
|
||||
v, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Timestamp: %v", err)
|
||||
}
|
||||
|
||||
sec := t.Unix()
|
||||
nsec := t.Nanosecond()
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||
return nil
|
||||
case "Value":
|
||||
switch {
|
||||
case string(in) == "null":
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
|
||||
case string(in) == "true":
|
||||
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
|
||||
case string(in) == "false":
|
||||
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
|
||||
case hasPrefixAndSuffix('"', in, '"'):
|
||||
s, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||
}
|
||||
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
|
||||
case hasPrefixAndSuffix('[', in, ']'):
|
||||
v := m.Mutable(fds.ByNumber(6))
|
||||
return u.unmarshalMessage(v.Message(), in)
|
||||
case hasPrefixAndSuffix('{', in, '}'):
|
||||
v := m.Mutable(fds.ByNumber(5))
|
||||
return u.unmarshalMessage(v.Message(), in)
|
||||
default:
|
||||
f, err := strconv.ParseFloat(string(in), 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||
}
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
|
||||
}
|
||||
return nil
|
||||
case "ListValue":
|
||||
var jsonArray []json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||
return fmt.Errorf("bad ListValue: %v", err)
|
||||
}
|
||||
|
||||
lv := m.Mutable(fds.ByNumber(1)).List()
|
||||
for _, raw := range jsonArray {
|
||||
ve := lv.NewElement()
|
||||
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
|
||||
return err
|
||||
}
|
||||
lv.Append(ve)
|
||||
}
|
||||
return nil
|
||||
case "Struct":
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return fmt.Errorf("bad StructValue: %v", err)
|
||||
}
|
||||
|
||||
mv := m.Mutable(fds.ByNumber(1)).Map()
|
||||
for key, raw := range jsonObject {
|
||||
kv := protoreflect.ValueOf(key).MapKey()
|
||||
vv := mv.NewValue()
|
||||
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
|
||||
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
|
||||
}
|
||||
mv.Set(kv, vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle known fields.
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if fd.IsWeak() && fd.Message().IsPlaceholder() {
|
||||
continue // weak reference is not linked in
|
||||
}
|
||||
|
||||
// Search for any raw JSON value associated with this field.
|
||||
var raw json.RawMessage
|
||||
name := string(fd.Name())
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
name = string(fd.Message().Name())
|
||||
}
|
||||
if v, ok := jsonObject[name]; ok {
|
||||
delete(jsonObject, name)
|
||||
raw = v
|
||||
}
|
||||
name = string(fd.JSONName())
|
||||
if v, ok := jsonObject[name]; ok {
|
||||
delete(jsonObject, name)
|
||||
raw = v
|
||||
}
|
||||
|
||||
field := m.NewField(fd)
|
||||
// Unmarshal the field value.
|
||||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||
continue
|
||||
}
|
||||
v, err := u.unmarshalValue(field, raw, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
|
||||
// Handle extension fields.
|
||||
for name, raw := range jsonObject {
|
||||
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Resolve the extension field by name.
|
||||
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
|
||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||
if xt == nil && isMessageSet(md) {
|
||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||
}
|
||||
if xt == nil {
|
||||
continue
|
||||
}
|
||||
delete(jsonObject, name)
|
||||
fd := xt.TypeDescriptor()
|
||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
|
||||
}
|
||||
|
||||
field := m.NewField(fd)
|
||||
// Unmarshal the field value.
|
||||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||
continue
|
||||
}
|
||||
v, err := u.unmarshalValue(field, raw, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
|
||||
if !u.AllowUnknownFields && len(jsonObject) > 0 {
|
||||
for name := range jsonObject {
|
||||
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||
if md := fd.Message(); md != nil {
|
||||
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
|
||||
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
|
||||
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
switch {
|
||||
case fd.IsList():
|
||||
var jsonArray []json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv := v.List()
|
||||
for _, raw := range jsonArray {
|
||||
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(ve)
|
||||
}
|
||||
return v, nil
|
||||
case fd.IsMap():
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return v, err
|
||||
}
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := v.Map()
|
||||
for key, raw := range jsonObject {
|
||||
var kv protoreflect.MapKey
|
||||
if kfd.Kind() == protoreflect.StringKind {
|
||||
kv = protoreflect.ValueOf(key).MapKey()
|
||||
} else {
|
||||
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
kv = v.MapKey()
|
||||
}
|
||||
|
||||
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
mv.Set(kv, vv)
|
||||
}
|
||||
return v, nil
|
||||
default:
|
||||
return u.unmarshalSingularValue(v, in, fd)
|
||||
}
|
||||
}
|
||||
|
||||
var nonFinite = map[string]float64{
|
||||
`"NaN"`: math.NaN(),
|
||||
`"Infinity"`: math.Inf(+1),
|
||||
`"-Infinity"`: math.Inf(-1),
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
switch fd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return unmarshalValue(in, new(bool))
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
return unmarshalValue(trimQuote(in), new(int32))
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return unmarshalValue(trimQuote(in), new(int64))
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
return unmarshalValue(trimQuote(in), new(uint32))
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return unmarshalValue(trimQuote(in), new(uint64))
|
||||
case protoreflect.FloatKind:
|
||||
if f, ok := nonFinite[string(in)]; ok {
|
||||
return protoreflect.ValueOfFloat32(float32(f)), nil
|
||||
}
|
||||
return unmarshalValue(trimQuote(in), new(float32))
|
||||
case protoreflect.DoubleKind:
|
||||
if f, ok := nonFinite[string(in)]; ok {
|
||||
return protoreflect.ValueOfFloat64(float64(f)), nil
|
||||
}
|
||||
return unmarshalValue(trimQuote(in), new(float64))
|
||||
case protoreflect.StringKind:
|
||||
return unmarshalValue(in, new(string))
|
||||
case protoreflect.BytesKind:
|
||||
return unmarshalValue(in, new([]byte))
|
||||
case protoreflect.EnumKind:
|
||||
if hasPrefixAndSuffix('"', in, '"') {
|
||||
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
|
||||
if vd == nil {
|
||||
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
|
||||
}
|
||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||
}
|
||||
return unmarshalValue(in, new(protoreflect.EnumNumber))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
err := u.unmarshalMessage(v.Message(), in)
|
||||
return v, err
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
|
||||
err := json.Unmarshal(in, v)
|
||||
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
|
||||
}
|
||||
|
||||
func unquoteString(in string) (out string, err error) {
|
||||
err = json.Unmarshal([]byte(in), &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
|
||||
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// trimQuote is like unquoteString but simply strips surrounding quotes.
|
||||
// This is incorrect, but is behavior done by the legacy implementation.
|
||||
func trimQuote(in []byte) []byte {
|
||||
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
|
||||
in = in[1 : len(in)-1]
|
||||
}
|
||||
return in
|
||||
}
|
559
vendor/github.com/golang/protobuf/jsonpb/encode.go
generated
vendored
Normal file
559
vendor/github.com/golang/protobuf/jsonpb/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,559 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapJSONMarshalV2 = false
|
||||
|
||||
// Marshaler is a configurable object for marshaling protocol buffer messages
|
||||
// to the specified JSON representation.
|
||||
type Marshaler struct {
|
||||
// OrigName specifies whether to use the original protobuf name for fields.
|
||||
OrigName bool
|
||||
|
||||
// EnumsAsInts specifies whether to render enum values as integers,
|
||||
// as opposed to string values.
|
||||
EnumsAsInts bool
|
||||
|
||||
// EmitDefaults specifies whether to render fields with zero values.
|
||||
EmitDefaults bool
|
||||
|
||||
// Indent controls whether the output is compact or not.
|
||||
// If empty, the output is compact JSON. Otherwise, every JSON object
|
||||
// entry and JSON array value will be on its own line.
|
||||
// Each line will be preceded by repeated copies of Indent, where the
|
||||
// number of copies is the current indentation depth.
|
||||
Indent string
|
||||
|
||||
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||
// If unset, the global registry is used by default.
|
||||
AnyResolver AnyResolver
|
||||
}
|
||||
|
||||
// JSONPBMarshaler is implemented by protobuf messages that customize the
|
||||
// way they are marshaled to JSON. Messages that implement this should also
|
||||
// implement JSONPBUnmarshaler so that the custom format can be parsed.
|
||||
//
|
||||
// The JSON marshaling must follow the proto to JSON specification:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
//
|
||||
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||
type JSONPBMarshaler interface {
|
||||
MarshalJSONPB(*Marshaler) ([]byte, error)
|
||||
}
|
||||
|
||||
// Marshal serializes a protobuf message as JSON into w.
|
||||
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
|
||||
b, err := jm.marshal(m)
|
||||
if len(b) > 0 {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalToString serializes a protobuf message as JSON in string form.
|
||||
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
|
||||
b, err := jm.marshal(m)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
|
||||
v := reflect.ValueOf(m)
|
||||
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||
return nil, errors.New("Marshal called with nil")
|
||||
}
|
||||
|
||||
// Check for custom marshalers first since they may not properly
|
||||
// implement protobuf reflection that the logic below relies on.
|
||||
if jsm, ok := m.(JSONPBMarshaler); ok {
|
||||
return jsm.MarshalJSONPB(jm)
|
||||
}
|
||||
|
||||
if wrapJSONMarshalV2 {
|
||||
opts := protojson.MarshalOptions{
|
||||
UseProtoNames: jm.OrigName,
|
||||
UseEnumNumbers: jm.EnumsAsInts,
|
||||
EmitUnpopulated: jm.EmitDefaults,
|
||||
Indent: jm.Indent,
|
||||
}
|
||||
if jm.AnyResolver != nil {
|
||||
opts.Resolver = anyResolver{jm.AnyResolver}
|
||||
}
|
||||
return opts.Marshal(proto.MessageReflect(m).Interface())
|
||||
} else {
|
||||
// Check for unpopulated required fields first.
|
||||
m2 := proto.MessageReflect(m)
|
||||
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := jsonWriter{Marshaler: jm}
|
||||
err := w.marshalMessage(m2, "", "")
|
||||
return w.buf, err
|
||||
}
|
||||
}
|
||||
|
||||
type jsonWriter struct {
|
||||
*Marshaler
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *jsonWriter) write(s string) {
|
||||
w.buf = append(w.buf, s...)
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
|
||||
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
|
||||
b, err := jsm.MarshalJSONPB(w.Marshaler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if typeURL != "" {
|
||||
// we are marshaling this object to an Any type
|
||||
var js map[string]*json.RawMessage
|
||||
if err = json.Unmarshal(b, &js); err != nil {
|
||||
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
|
||||
}
|
||||
turl, err := json.Marshal(typeURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
|
||||
}
|
||||
js["@type"] = (*json.RawMessage)(&turl)
|
||||
if b, err = json.Marshal(js); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
// Handle well-known types.
|
||||
const secondInNanos = int64(time.Second / time.Nanosecond)
|
||||
switch wellKnownType(md.FullName()) {
|
||||
case "Any":
|
||||
return w.marshalAny(m, indent)
|
||||
case "BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue":
|
||||
fd := fds.ByNumber(1)
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
case "Duration":
|
||||
const maxSecondsInDuration = 315576000000
|
||||
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||
// depending on required precision."
|
||||
s := m.Get(fds.ByNumber(1)).Int()
|
||||
ns := m.Get(fds.ByNumber(2)).Int()
|
||||
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
|
||||
return fmt.Errorf("seconds out of range %v", s)
|
||||
}
|
||||
if ns <= -secondInNanos || ns >= secondInNanos {
|
||||
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
|
||||
}
|
||||
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
|
||||
return errors.New("signs of seconds and nanos do not match")
|
||||
}
|
||||
var sign string
|
||||
if s < 0 || ns < 0 {
|
||||
sign, s, ns = "-", -1*s, -1*ns
|
||||
}
|
||||
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, ".000")
|
||||
w.write(fmt.Sprintf(`"%vs"`, x))
|
||||
return nil
|
||||
case "Timestamp":
|
||||
// "RFC 3339, where generated output will always be Z-normalized
|
||||
// and uses 0, 3, 6 or 9 fractional digits."
|
||||
s := m.Get(fds.ByNumber(1)).Int()
|
||||
ns := m.Get(fds.ByNumber(2)).Int()
|
||||
if ns < 0 || ns >= secondInNanos {
|
||||
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
|
||||
}
|
||||
t := time.Unix(s, ns).UTC()
|
||||
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
||||
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, ".000")
|
||||
w.write(fmt.Sprintf(`"%vZ"`, x))
|
||||
return nil
|
||||
case "Value":
|
||||
// JSON value; which is a null, number, string, bool, object, or array.
|
||||
od := md.Oneofs().Get(0)
|
||||
fd := m.WhichOneof(od)
|
||||
if fd == nil {
|
||||
return errors.New("nil Value")
|
||||
}
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
case "Struct", "ListValue":
|
||||
// JSON object or array.
|
||||
fd := fds.ByNumber(1)
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
}
|
||||
|
||||
w.write("{")
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
}
|
||||
|
||||
firstField := true
|
||||
if typeURL != "" {
|
||||
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
|
||||
for i := 0; i < fds.Len(); {
|
||||
fd := fds.Get(i)
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
fd = m.WhichOneof(od)
|
||||
i += od.Fields().Len()
|
||||
if fd == nil {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
|
||||
if !m.Has(fd) {
|
||||
if !w.EmitDefaults || fd.ContainingOneof() != nil {
|
||||
continue
|
||||
}
|
||||
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
|
||||
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
|
||||
}
|
||||
}
|
||||
|
||||
if !firstField {
|
||||
w.writeComma()
|
||||
}
|
||||
if err := w.marshalField(fd, v, indent); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
|
||||
// Handle proto2 extensions.
|
||||
if md.ExtensionRanges().Len() > 0 {
|
||||
// Collect a sorted list of all extension descriptor and values.
|
||||
type ext struct {
|
||||
desc protoreflect.FieldDescriptor
|
||||
val protoreflect.Value
|
||||
}
|
||||
var exts []ext
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
exts = append(exts, ext{fd, v})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(exts, func(i, j int) bool {
|
||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||
})
|
||||
|
||||
for _, ext := range exts {
|
||||
if !firstField {
|
||||
w.writeComma()
|
||||
}
|
||||
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
}
|
||||
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
}
|
||||
w.write("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *jsonWriter) writeComma() {
|
||||
if w.Indent != "" {
|
||||
w.write(",\n")
|
||||
} else {
|
||||
w.write(",")
|
||||
}
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
|
||||
// "If the Any contains a value that has a special JSON mapping,
|
||||
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
||||
// Otherwise, the value will be converted into a JSON object,
|
||||
// and the "@type" field will be inserted to indicate the actual data type."
|
||||
md := m.Descriptor()
|
||||
typeURL := m.Get(md.Fields().ByNumber(1)).String()
|
||||
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
|
||||
|
||||
var m2 protoreflect.Message
|
||||
if w.AnyResolver != nil {
|
||||
mi, err := w.AnyResolver.Resolve(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = proto.MessageReflect(mi)
|
||||
} else {
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = mt.New()
|
||||
}
|
||||
|
||||
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if wellKnownType(m2.Descriptor().FullName()) == "" {
|
||||
return w.marshalMessage(m2, indent, typeURL)
|
||||
}
|
||||
|
||||
w.write("{")
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
}
|
||||
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||
return err
|
||||
}
|
||||
w.writeComma()
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(`"value": `)
|
||||
} else {
|
||||
w.write(`"value":`)
|
||||
}
|
||||
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
}
|
||||
w.write("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`"@type":`)
|
||||
if w.Indent != "" {
|
||||
w.write(" ")
|
||||
}
|
||||
b, err := json.Marshal(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalField writes field description and value to the Writer.
|
||||
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`"`)
|
||||
switch {
|
||||
case fd.IsExtension():
|
||||
// For message set, use the fname of the message as the extension name.
|
||||
name := string(fd.FullName())
|
||||
if isMessageSet(fd.ContainingMessage()) {
|
||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||
}
|
||||
|
||||
w.write("[" + name + "]")
|
||||
case w.OrigName:
|
||||
name := string(fd.Name())
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
name = string(fd.Message().Name())
|
||||
}
|
||||
w.write(name)
|
||||
default:
|
||||
w.write(string(fd.JSONName()))
|
||||
}
|
||||
w.write(`":`)
|
||||
if w.Indent != "" {
|
||||
w.write(" ")
|
||||
}
|
||||
return w.marshalValue(fd, v, indent)
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
switch {
|
||||
case fd.IsList():
|
||||
w.write("[")
|
||||
comma := ""
|
||||
lv := v.List()
|
||||
for i := 0; i < lv.Len(); i++ {
|
||||
w.write(comma)
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
|
||||
return err
|
||||
}
|
||||
comma = ","
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write("]")
|
||||
return nil
|
||||
case fd.IsMap():
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := v.Map()
|
||||
|
||||
// Collect a sorted list of all map keys and values.
|
||||
type entry struct{ key, val protoreflect.Value }
|
||||
var entries []entry
|
||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
entries = append(entries, entry{k.Value(), v})
|
||||
return true
|
||||
})
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
switch kfd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return entries[i].key.Int() < entries[j].key.Int()
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||
case protoreflect.StringKind:
|
||||
return entries[i].key.String() < entries[j].key.String()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
})
|
||||
|
||||
w.write(`{`)
|
||||
comma := ""
|
||||
for _, entry := range entries {
|
||||
w.write(comma)
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
|
||||
s := fmt.Sprint(entry.key.Interface())
|
||||
b, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
|
||||
w.write(`:`)
|
||||
if w.Indent != "" {
|
||||
w.write(` `)
|
||||
}
|
||||
|
||||
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
|
||||
return err
|
||||
}
|
||||
comma = ","
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`}`)
|
||||
return nil
|
||||
default:
|
||||
return w.marshalSingularValue(fd, v, indent)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
switch {
|
||||
case !v.IsValid():
|
||||
w.write("null")
|
||||
return nil
|
||||
case fd.Message() != nil:
|
||||
return w.marshalMessage(v.Message(), indent+w.Indent, "")
|
||||
case fd.Enum() != nil:
|
||||
if fd.Enum().FullName() == "google.protobuf.NullValue" {
|
||||
w.write("null")
|
||||
return nil
|
||||
}
|
||||
|
||||
vd := fd.Enum().Values().ByNumber(v.Enum())
|
||||
if vd == nil || w.EnumsAsInts {
|
||||
w.write(strconv.Itoa(int(v.Enum())))
|
||||
} else {
|
||||
w.write(`"` + string(vd.Name()) + `"`)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
switch v.Interface().(type) {
|
||||
case float32, float64:
|
||||
switch {
|
||||
case math.IsInf(v.Float(), +1):
|
||||
w.write(`"Infinity"`)
|
||||
return nil
|
||||
case math.IsInf(v.Float(), -1):
|
||||
w.write(`"-Infinity"`)
|
||||
return nil
|
||||
case math.IsNaN(v.Float()):
|
||||
w.write(`"NaN"`)
|
||||
return nil
|
||||
}
|
||||
case int64, uint64:
|
||||
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.Marshal(v.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
}
|
69
vendor/github.com/golang/protobuf/jsonpb/json.go
generated
vendored
Normal file
69
vendor/github.com/golang/protobuf/jsonpb/json.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package jsonpb provides functionality to marshal and unmarshal between a
|
||||
// protocol buffer message and JSON. It follows the specification at
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||
//
|
||||
// Do not rely on the default behavior of the standard encoding/json package
|
||||
// when called on generated message types as it does not operate correctly.
|
||||
//
|
||||
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
|
||||
// package instead.
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// AnyResolver takes a type URL, present in an Any message,
|
||||
// and resolves it into an instance of the associated message.
|
||||
type AnyResolver interface {
|
||||
Resolve(typeURL string) (proto.Message, error)
|
||||
}
|
||||
|
||||
type anyResolver struct{ AnyResolver }
|
||||
|
||||
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
|
||||
return r.FindMessageByURL(string(message))
|
||||
}
|
||||
|
||||
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
|
||||
m, err := r.Resolve(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return protoimpl.X.MessageTypeOf(m), nil
|
||||
}
|
||||
|
||||
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||
}
|
||||
|
||||
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||
}
|
||||
|
||||
func wellKnownType(s protoreflect.FullName) string {
|
||||
if s.Parent() == "google.protobuf" {
|
||||
switch s.Name() {
|
||||
case "Empty", "Any",
|
||||
"BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue",
|
||||
"Duration", "Timestamp",
|
||||
"NullValue", "Struct", "Value", "ListValue":
|
||||
return string(s.Name())
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||
return ok && ms.IsMessageSet()
|
||||
}
|
179
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
179
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const urlPrefix = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||
// Most type assertions should use the Is function instead.
|
||||
//
|
||||
// Deprecated: Call the any.MessageName method instead.
|
||||
func AnyMessageName(any *anypb.Any) (string, error) {
|
||||
name, err := anyMessageName(any)
|
||||
return string(name), err
|
||||
}
|
||||
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
name := protoreflect.FullName(any.TypeUrl)
|
||||
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
|
||||
name = name[i+len("/"):]
|
||||
}
|
||||
if !name.IsValid() {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||
//
|
||||
// Deprecated: Call the anypb.New function instead.
|
||||
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||
switch dm := m.(type) {
|
||||
case DynamicAny:
|
||||
m = dm.Message
|
||||
case *DynamicAny:
|
||||
if dm == nil {
|
||||
return nil, proto.ErrNil
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
|
||||
}
|
||||
|
||||
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||
// be resolved in the global registry.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
|
||||
// to resolve the message name and create a new instance of it.
|
||||
func Empty(any *anypb.Any) (proto.Message, error) {
|
||||
name, err := anyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.MessageV1(mt.New().Interface()), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
||||
// into the provided message m. It returns an error if the target message
|
||||
// does not match the type in the Any message or if an unmarshal error occurs.
|
||||
//
|
||||
// The target message m may be a *DynamicAny message. If the underlying message
|
||||
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||
//
|
||||
// Deprecated: Call the any.UnmarshalTo method instead.
|
||||
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||
if dm, ok := m.(*DynamicAny); ok {
|
||||
if dm.Message == nil {
|
||||
var err error
|
||||
dm.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
|
||||
anyName, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgName := proto.MessageName(m)
|
||||
if anyName != msgName {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, m)
|
||||
}
|
||||
|
||||
// Is reports whether the Any message contains a message of the specified type.
|
||||
//
|
||||
// Deprecated: Call the any.MessageIs method instead.
|
||||
func Is(any *anypb.Any, m proto.Message) bool {
|
||||
if any == nil || m == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(m)
|
||||
if !strings.HasSuffix(any.TypeUrl, name) {
|
||||
return false
|
||||
}
|
||||
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in an anypb.Any message.
|
||||
// The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
//
|
||||
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
|
||||
// the any message contents into a new instance of the underlying message.
|
||||
type DynamicAny struct{ proto.Message }
|
||||
|
||||
func (m DynamicAny) String() string {
|
||||
if m.Message == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return m.Message.String()
|
||||
}
|
||||
func (m DynamicAny) Reset() {
|
||||
if m.Message == nil {
|
||||
return
|
||||
}
|
||||
m.Message.Reset()
|
||||
}
|
||||
func (m DynamicAny) ProtoMessage() {
|
||||
return
|
||||
}
|
||||
func (m DynamicAny) ProtoReflect() protoreflect.Message {
|
||||
if m.Message == nil {
|
||||
return nil
|
||||
}
|
||||
return dynamicAny{proto.MessageReflect(m.Message)}
|
||||
}
|
||||
|
||||
type dynamicAny struct{ protoreflect.Message }
|
||||
|
||||
func (m dynamicAny) Type() protoreflect.MessageType {
|
||||
return dynamicAnyType{m.Message.Type()}
|
||||
}
|
||||
func (m dynamicAny) New() protoreflect.Message {
|
||||
return dynamicAnyType{m.Message.Type()}.New()
|
||||
}
|
||||
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
|
||||
return DynamicAny{proto.MessageV1(m.Message.Interface())}
|
||||
}
|
||||
|
||||
type dynamicAnyType struct{ protoreflect.MessageType }
|
||||
|
||||
func (t dynamicAnyType) New() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.New()}
|
||||
}
|
||||
func (t dynamicAnyType) Zero() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.Zero()}
|
||||
}
|
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||
|
||||
package any
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/any.proto.
|
||||
|
||||
type Any = anypb.Any
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
|
||||
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
|
||||
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
|
||||
}
|
10
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
10
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ptypes provides functionality for interacting with well-known types.
|
||||
//
|
||||
// Deprecated: Well-known types have specialized functionality directly
|
||||
// injected into the generated packages for each message type.
|
||||
// See the deprecation notice for each function for the suggested alternative.
|
||||
package ptypes
|
76
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
76
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durationpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in duration.proto.
|
||||
// This is about 10,000 years in seconds.
|
||||
const (
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// Duration converts a durationpb.Duration to a time.Duration.
|
||||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||
//
|
||||
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
|
||||
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(dur); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(dur.Seconds) * time.Second
|
||||
if int64(d/time.Second) != dur.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
if dur.Nanos != 0 {
|
||||
d += time.Duration(dur.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (dur.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||
//
|
||||
// Deprecated: Call the durationpb.New function instead.
|
||||
func DurationProto(d time.Duration) *durationpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durationpb.Duration{
|
||||
Seconds: int64(secs),
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
||||
|
||||
// validateDuration determines whether the durationpb.Duration is valid
|
||||
// according to the definition in google/protobuf/duration.proto.
|
||||
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
||||
// Note that the range of durationpb.Duration is about 10,000 years,
|
||||
// while the range of time.Duration is about 290 years.
|
||||
func validateDuration(dur *durationpb.Duration) error {
|
||||
if dur == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", dur)
|
||||
}
|
||||
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", dur)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
|
||||
}
|
||||
return nil
|
||||
}
|
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
package duration
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/duration.proto.
|
||||
|
||||
type Duration = durationpb.Duration
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
|
||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
|
||||
}
|
112
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
112
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return
|
||||
// value is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
|
||||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.Now function instead.
|
||||
func TimestampNow() *timestamppb.Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.New function instead.
|
||||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||
ts := ×tamppb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||
// For invalid Timestamps, it returns an error message in parentheses.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime method instead,
|
||||
// followed by a call to the Format method on the time.Time value.
|
||||
func TimestampString(ts *timestamppb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
||||
// and has a Nanos field in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time,
|
||||
// but the converse is not true.
|
||||
func validateTimestamp(ts *timestamppb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||
|
||||
type Timestamp = timestamppb.Timestamp
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||
}
|
27
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt
generated
vendored
Normal file
27
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2015, Gengo, Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Gengo, Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
35
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
generated
vendored
Normal file
35
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "httprule",
|
||||
srcs = [
|
||||
"compile.go",
|
||||
"parse.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
|
||||
deps = ["//utilities"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "httprule_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"compile_test.go",
|
||||
"parse_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
embed = [":httprule"],
|
||||
deps = [
|
||||
"//utilities",
|
||||
"@com_github_golang_glog//:glog",
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":httprule",
|
||||
visibility = ["//:__subpackages__"],
|
||||
)
|
121
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
generated
vendored
Normal file
121
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
package httprule
|
||||
|
||||
import (
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
)
|
||||
|
||||
const (
|
||||
opcodeVersion = 1
|
||||
)
|
||||
|
||||
// Template is a compiled representation of path templates.
|
||||
type Template struct {
|
||||
// Version is the version number of the format.
|
||||
Version int
|
||||
// OpCodes is a sequence of operations.
|
||||
OpCodes []int
|
||||
// Pool is a constant pool
|
||||
Pool []string
|
||||
// Verb is a VERB part in the template.
|
||||
Verb string
|
||||
// Fields is a list of field paths bound in this template.
|
||||
Fields []string
|
||||
// Original template (example: /v1/a_bit_of_everything)
|
||||
Template string
|
||||
}
|
||||
|
||||
// Compiler compiles utilities representation of path templates into marshallable operations.
|
||||
// They can be unmarshalled by runtime.NewPattern.
|
||||
type Compiler interface {
|
||||
Compile() Template
|
||||
}
|
||||
|
||||
type op struct {
|
||||
// code is the opcode of the operation
|
||||
code utilities.OpCode
|
||||
|
||||
// str is a string operand of the code.
|
||||
// num is ignored if str is not empty.
|
||||
str string
|
||||
|
||||
// num is a numeric operand of the code.
|
||||
num int
|
||||
}
|
||||
|
||||
func (w wildcard) compile() []op {
|
||||
return []op{
|
||||
{code: utilities.OpPush},
|
||||
}
|
||||
}
|
||||
|
||||
func (w deepWildcard) compile() []op {
|
||||
return []op{
|
||||
{code: utilities.OpPushM},
|
||||
}
|
||||
}
|
||||
|
||||
func (l literal) compile() []op {
|
||||
return []op{
|
||||
{
|
||||
code: utilities.OpLitPush,
|
||||
str: string(l),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (v variable) compile() []op {
|
||||
var ops []op
|
||||
for _, s := range v.segments {
|
||||
ops = append(ops, s.compile()...)
|
||||
}
|
||||
ops = append(ops, op{
|
||||
code: utilities.OpConcatN,
|
||||
num: len(v.segments),
|
||||
}, op{
|
||||
code: utilities.OpCapture,
|
||||
str: v.path,
|
||||
})
|
||||
|
||||
return ops
|
||||
}
|
||||
|
||||
func (t template) Compile() Template {
|
||||
var rawOps []op
|
||||
for _, s := range t.segments {
|
||||
rawOps = append(rawOps, s.compile()...)
|
||||
}
|
||||
|
||||
var (
|
||||
ops []int
|
||||
pool []string
|
||||
fields []string
|
||||
)
|
||||
consts := make(map[string]int)
|
||||
for _, op := range rawOps {
|
||||
ops = append(ops, int(op.code))
|
||||
if op.str == "" {
|
||||
ops = append(ops, op.num)
|
||||
} else {
|
||||
// eof segment literal represents the "/" path pattern
|
||||
if op.str == eof {
|
||||
op.str = ""
|
||||
}
|
||||
if _, ok := consts[op.str]; !ok {
|
||||
consts[op.str] = len(pool)
|
||||
pool = append(pool, op.str)
|
||||
}
|
||||
ops = append(ops, consts[op.str])
|
||||
}
|
||||
if op.code == utilities.OpCapture {
|
||||
fields = append(fields, op.str)
|
||||
}
|
||||
}
|
||||
return Template{
|
||||
Version: opcodeVersion,
|
||||
OpCodes: ops,
|
||||
Pool: pool,
|
||||
Verb: t.verb,
|
||||
Fields: fields,
|
||||
Template: t.template,
|
||||
}
|
||||
}
|
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
generated
vendored
Normal file
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build gofuzz
|
||||
|
||||
package httprule
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
_, err := Parse(string(data))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return 0
|
||||
}
|
368
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
Normal file
368
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,368 @@
|
|||
package httprule
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// InvalidTemplateError indicates that the path template is not valid.
|
||||
type InvalidTemplateError struct {
|
||||
tmpl string
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e InvalidTemplateError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
|
||||
}
|
||||
|
||||
// Parse parses the string representation of path template
|
||||
func Parse(tmpl string) (Compiler, error) {
|
||||
if !strings.HasPrefix(tmpl, "/") {
|
||||
return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
|
||||
}
|
||||
tokens, verb := tokenize(tmpl[1:])
|
||||
|
||||
p := parser{tokens: tokens}
|
||||
segs, err := p.topLevelSegments()
|
||||
if err != nil {
|
||||
return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
|
||||
}
|
||||
|
||||
return template{
|
||||
segments: segs,
|
||||
verb: verb,
|
||||
template: tmpl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func tokenize(path string) (tokens []string, verb string) {
|
||||
if path == "" {
|
||||
return []string{eof}, ""
|
||||
}
|
||||
|
||||
const (
|
||||
init = iota
|
||||
field
|
||||
nested
|
||||
)
|
||||
st := init
|
||||
for path != "" {
|
||||
var idx int
|
||||
switch st {
|
||||
case init:
|
||||
idx = strings.IndexAny(path, "/{")
|
||||
case field:
|
||||
idx = strings.IndexAny(path, ".=}")
|
||||
case nested:
|
||||
idx = strings.IndexAny(path, "/}")
|
||||
}
|
||||
if idx < 0 {
|
||||
tokens = append(tokens, path)
|
||||
break
|
||||
}
|
||||
switch r := path[idx]; r {
|
||||
case '/', '.':
|
||||
case '{':
|
||||
st = field
|
||||
case '=':
|
||||
st = nested
|
||||
case '}':
|
||||
st = init
|
||||
}
|
||||
if idx == 0 {
|
||||
tokens = append(tokens, path[idx:idx+1])
|
||||
} else {
|
||||
tokens = append(tokens, path[:idx], path[idx:idx+1])
|
||||
}
|
||||
path = path[idx+1:]
|
||||
}
|
||||
|
||||
l := len(tokens)
|
||||
// See
|
||||
// https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
|
||||
// although normal and backwards-compat logic here is to use the last index
|
||||
// of a colon, if the final segment is a variable followed by a colon, the
|
||||
// part following the colon must be a verb. Hence if the previous token is
|
||||
// an end var marker, we switch the index we're looking for to Index instead
|
||||
// of LastIndex, so that we correctly grab the remaining part of the path as
|
||||
// the verb.
|
||||
var penultimateTokenIsEndVar bool
|
||||
switch l {
|
||||
case 0, 1:
|
||||
// Not enough to be variable so skip this logic and don't result in an
|
||||
// invalid index
|
||||
default:
|
||||
penultimateTokenIsEndVar = tokens[l-2] == "}"
|
||||
}
|
||||
t := tokens[l-1]
|
||||
var idx int
|
||||
if penultimateTokenIsEndVar {
|
||||
idx = strings.Index(t, ":")
|
||||
} else {
|
||||
idx = strings.LastIndex(t, ":")
|
||||
}
|
||||
if idx == 0 {
|
||||
tokens, verb = tokens[:l-1], t[1:]
|
||||
} else if idx > 0 {
|
||||
tokens[l-1], verb = t[:idx], t[idx+1:]
|
||||
}
|
||||
tokens = append(tokens, eof)
|
||||
return tokens, verb
|
||||
}
|
||||
|
||||
// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
|
||||
type parser struct {
|
||||
tokens []string
|
||||
accepted []string
|
||||
}
|
||||
|
||||
// topLevelSegments is the target of this parser.
|
||||
func (p *parser) topLevelSegments() ([]segment, error) {
|
||||
if _, err := p.accept(typeEOF); err == nil {
|
||||
p.tokens = p.tokens[:0]
|
||||
return []segment{literal(eof)}, nil
|
||||
}
|
||||
segs, err := p.segments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := p.accept(typeEOF); err != nil {
|
||||
return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
|
||||
}
|
||||
return segs, nil
|
||||
}
|
||||
|
||||
func (p *parser) segments() ([]segment, error) {
|
||||
s, err := p.segment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
segs := []segment{s}
|
||||
for {
|
||||
if _, err := p.accept("/"); err != nil {
|
||||
return segs, nil
|
||||
}
|
||||
s, err := p.segment()
|
||||
if err != nil {
|
||||
return segs, err
|
||||
}
|
||||
segs = append(segs, s)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) segment() (segment, error) {
|
||||
if _, err := p.accept("*"); err == nil {
|
||||
return wildcard{}, nil
|
||||
}
|
||||
if _, err := p.accept("**"); err == nil {
|
||||
return deepWildcard{}, nil
|
||||
}
|
||||
if l, err := p.literal(); err == nil {
|
||||
return l, nil
|
||||
}
|
||||
|
||||
v, err := p.variable()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
|
||||
}
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *parser) literal() (segment, error) {
|
||||
lit, err := p.accept(typeLiteral)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return literal(lit), nil
|
||||
}
|
||||
|
||||
func (p *parser) variable() (segment, error) {
|
||||
if _, err := p.accept("{"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path, err := p.fieldPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var segs []segment
|
||||
if _, err := p.accept("="); err == nil {
|
||||
segs, err = p.segments()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
|
||||
}
|
||||
} else {
|
||||
segs = []segment{wildcard{}}
|
||||
}
|
||||
|
||||
if _, err := p.accept("}"); err != nil {
|
||||
return nil, fmt.Errorf("unterminated variable segment: %s", path)
|
||||
}
|
||||
return variable{
|
||||
path: path,
|
||||
segments: segs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *parser) fieldPath() (string, error) {
|
||||
c, err := p.accept(typeIdent)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
components := []string{c}
|
||||
for {
|
||||
if _, err = p.accept("."); err != nil {
|
||||
return strings.Join(components, "."), nil
|
||||
}
|
||||
c, err := p.accept(typeIdent)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid field path component: %v", err)
|
||||
}
|
||||
components = append(components, c)
|
||||
}
|
||||
}
|
||||
|
||||
// A termType is a type of terminal symbols.
|
||||
type termType string
|
||||
|
||||
// These constants define some of valid values of termType.
|
||||
// They improve readability of parse functions.
|
||||
//
|
||||
// You can also use "/", "*", "**", "." or "=" as valid values.
|
||||
const (
|
||||
typeIdent = termType("ident")
|
||||
typeLiteral = termType("literal")
|
||||
typeEOF = termType("$")
|
||||
)
|
||||
|
||||
const (
|
||||
// eof is the terminal symbol which always appears at the end of token sequence.
|
||||
eof = "\u0000"
|
||||
)
|
||||
|
||||
// accept tries to accept a token in "p".
|
||||
// This function consumes a token and returns it if it matches to the specified "term".
|
||||
// If it doesn't match, the function does not consume any tokens and return an error.
|
||||
func (p *parser) accept(term termType) (string, error) {
|
||||
t := p.tokens[0]
|
||||
switch term {
|
||||
case "/", "*", "**", ".", "=", "{", "}":
|
||||
if t != string(term) && t != "/" {
|
||||
return "", fmt.Errorf("expected %q but got %q", term, t)
|
||||
}
|
||||
case typeEOF:
|
||||
if t != eof {
|
||||
return "", fmt.Errorf("expected EOF but got %q", t)
|
||||
}
|
||||
case typeIdent:
|
||||
if err := expectIdent(t); err != nil {
|
||||
return "", err
|
||||
}
|
||||
case typeLiteral:
|
||||
if err := expectPChars(t); err != nil {
|
||||
return "", err
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("unknown termType %q", term)
|
||||
}
|
||||
p.tokens = p.tokens[1:]
|
||||
p.accepted = append(p.accepted, t)
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
|
||||
//
|
||||
// https://www.ietf.org/rfc/rfc3986.txt, P.49
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
func expectPChars(t string) error {
|
||||
const (
|
||||
init = iota
|
||||
pct1
|
||||
pct2
|
||||
)
|
||||
st := init
|
||||
for _, r := range t {
|
||||
if st != init {
|
||||
if !isHexDigit(r) {
|
||||
return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
|
||||
}
|
||||
switch st {
|
||||
case pct1:
|
||||
st = pct2
|
||||
case pct2:
|
||||
st = init
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// unreserved
|
||||
switch {
|
||||
case 'A' <= r && r <= 'Z':
|
||||
continue
|
||||
case 'a' <= r && r <= 'z':
|
||||
continue
|
||||
case '0' <= r && r <= '9':
|
||||
continue
|
||||
}
|
||||
switch r {
|
||||
case '-', '.', '_', '~':
|
||||
// unreserved
|
||||
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
|
||||
// sub-delims
|
||||
case ':', '@':
|
||||
// rest of pchar
|
||||
case '%':
|
||||
// pct-encoded
|
||||
st = pct1
|
||||
default:
|
||||
return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
|
||||
}
|
||||
}
|
||||
if st != init {
|
||||
return fmt.Errorf("invalid percent-encoding in %q", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
|
||||
func expectIdent(ident string) error {
|
||||
if ident == "" {
|
||||
return fmt.Errorf("empty identifier")
|
||||
}
|
||||
for pos, r := range ident {
|
||||
switch {
|
||||
case '0' <= r && r <= '9':
|
||||
if pos == 0 {
|
||||
return fmt.Errorf("identifier starting with digit: %s", ident)
|
||||
}
|
||||
continue
|
||||
case 'A' <= r && r <= 'Z':
|
||||
continue
|
||||
case 'a' <= r && r <= 'z':
|
||||
continue
|
||||
case r == '_':
|
||||
continue
|
||||
default:
|
||||
return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isHexDigit(r rune) bool {
|
||||
switch {
|
||||
case '0' <= r && r <= '9':
|
||||
return true
|
||||
case 'A' <= r && r <= 'F':
|
||||
return true
|
||||
case 'a' <= r && r <= 'f':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
60
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
generated
vendored
Normal file
60
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
package httprule
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type template struct {
|
||||
segments []segment
|
||||
verb string
|
||||
template string
|
||||
}
|
||||
|
||||
type segment interface {
|
||||
fmt.Stringer
|
||||
compile() (ops []op)
|
||||
}
|
||||
|
||||
type wildcard struct{}
|
||||
|
||||
type deepWildcard struct{}
|
||||
|
||||
type literal string
|
||||
|
||||
type variable struct {
|
||||
path string
|
||||
segments []segment
|
||||
}
|
||||
|
||||
func (wildcard) String() string {
|
||||
return "*"
|
||||
}
|
||||
|
||||
func (deepWildcard) String() string {
|
||||
return "**"
|
||||
}
|
||||
|
||||
func (l literal) String() string {
|
||||
return string(l)
|
||||
}
|
||||
|
||||
func (v variable) String() string {
|
||||
var segs []string
|
||||
for _, s := range v.segments {
|
||||
segs = append(segs, s.String())
|
||||
}
|
||||
return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
|
||||
}
|
||||
|
||||
func (t template) String() string {
|
||||
var segs []string
|
||||
for _, s := range t.segments {
|
||||
segs = append(segs, s.String())
|
||||
}
|
||||
str := strings.Join(segs, "/")
|
||||
if t.verb != "" {
|
||||
str = fmt.Sprintf("%s:%s", str, t.verb)
|
||||
}
|
||||
return "/" + str
|
||||
}
|
91
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
Normal file
91
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "runtime",
|
||||
srcs = [
|
||||
"context.go",
|
||||
"convert.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"fieldmask.go",
|
||||
"handler.go",
|
||||
"marshal_httpbodyproto.go",
|
||||
"marshal_json.go",
|
||||
"marshal_jsonpb.go",
|
||||
"marshal_proto.go",
|
||||
"marshaler.go",
|
||||
"marshaler_registry.go",
|
||||
"mux.go",
|
||||
"pattern.go",
|
||||
"proto2_convert.go",
|
||||
"query.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
|
||||
deps = [
|
||||
"//internal/httprule",
|
||||
"//utilities",
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//grpclog",
|
||||
"@org_golang_google_grpc//metadata",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson",
|
||||
"@org_golang_google_protobuf//proto",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||
"@org_golang_google_protobuf//reflect/protoregistry",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "runtime_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"context_test.go",
|
||||
"convert_test.go",
|
||||
"errors_test.go",
|
||||
"fieldmask_test.go",
|
||||
"handler_test.go",
|
||||
"marshal_httpbodyproto_test.go",
|
||||
"marshal_json_test.go",
|
||||
"marshal_jsonpb_test.go",
|
||||
"marshal_proto_test.go",
|
||||
"marshaler_registry_test.go",
|
||||
"mux_test.go",
|
||||
"pattern_test.go",
|
||||
"query_test.go",
|
||||
],
|
||||
embed = [":runtime"],
|
||||
deps = [
|
||||
"//runtime/internal/examplepb",
|
||||
"//utilities",
|
||||
"@com_github_google_go_cmp//cmp",
|
||||
"@com_github_google_go_cmp//cmp/cmpopts",
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@go_googleapis//google/rpc:errdetails_go_proto",
|
||||
"@go_googleapis//google/rpc:status_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//metadata",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson",
|
||||
"@org_golang_google_protobuf//proto",
|
||||
"@org_golang_google_protobuf//testing/protocmp",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/emptypb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":runtime",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
345
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
Normal file
345
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
Normal file
|
@ -0,0 +1,345 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
||||
// parameters to or from a gRPC call.
|
||||
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
||||
|
||||
// MetadataPrefix is prepended to permanent HTTP header keys (as specified
|
||||
// by the IANA) when added to the gRPC context.
|
||||
const MetadataPrefix = "grpcgateway-"
|
||||
|
||||
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
||||
// HTTP headers in a response handled by grpc-gateway
|
||||
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
||||
|
||||
const metadataGrpcTimeout = "Grpc-Timeout"
|
||||
const metadataHeaderBinarySuffix = "-Bin"
|
||||
|
||||
const xForwardedFor = "X-Forwarded-For"
|
||||
const xForwardedHost = "X-Forwarded-Host"
|
||||
|
||||
var (
|
||||
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||
DefaultContextTimeout = 0 * time.Second
|
||||
)
|
||||
|
||||
type (
|
||||
rpcMethodKey struct{}
|
||||
httpPathPatternKey struct{}
|
||||
|
||||
AnnotateContextOption func(ctx context.Context) context.Context
|
||||
)
|
||||
|
||||
func WithHTTPPathPattern(pattern string) AnnotateContextOption {
|
||||
return func(ctx context.Context) context.Context {
|
||||
return withHTTPPathPattern(ctx, pattern)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeBinHeader(v string) ([]byte, error) {
|
||||
if len(v)%4 == 0 {
|
||||
// Input was padded, or padding was not necessary.
|
||||
return base64.StdEncoding.DecodeString(v)
|
||||
}
|
||||
return base64.RawStdEncoding.DecodeString(v)
|
||||
}
|
||||
|
||||
/*
|
||||
AnnotateContext adds context information such as metadata from the request.
|
||||
|
||||
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
|
||||
except that the forwarded destination is not another HTTP service but rather
|
||||
a gRPC service.
|
||||
*/
|
||||
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
|
||||
ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if md == nil {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
return metadata.NewOutgoingContext(ctx, md), nil
|
||||
}
|
||||
|
||||
// AnnotateIncomingContext adds context information such as metadata from the request.
|
||||
// Attach metadata as incoming context.
|
||||
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
|
||||
ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if md == nil {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
return metadata.NewIncomingContext(ctx, md), nil
|
||||
}
|
||||
|
||||
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
|
||||
ctx = withRPCMethod(ctx, rpcMethodName)
|
||||
for _, o := range options {
|
||||
ctx = o(ctx)
|
||||
}
|
||||
var pairs []string
|
||||
timeout := DefaultContextTimeout
|
||||
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
||||
var err error
|
||||
timeout, err = timeoutDecode(tm)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
||||
}
|
||||
}
|
||||
|
||||
for key, vals := range req.Header {
|
||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||
for _, val := range vals {
|
||||
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
||||
if key == "Authorization" {
|
||||
pairs = append(pairs, "authorization", val)
|
||||
}
|
||||
if h, ok := mux.incomingHeaderMatcher(key); ok {
|
||||
// Handles "-bin" metadata in grpc, since grpc will do another base64
|
||||
// encode before sending to server, we need to decode it first.
|
||||
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
|
||||
b, err := decodeBinHeader(val)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
|
||||
}
|
||||
|
||||
val = string(b)
|
||||
}
|
||||
pairs = append(pairs, h, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
if host := req.Header.Get(xForwardedHost); host != "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
|
||||
} else if req.Host != "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
|
||||
}
|
||||
|
||||
if addr := req.RemoteAddr; addr != "" {
|
||||
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
|
||||
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
|
||||
} else {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if timeout != 0 {
|
||||
//nolint:govet // The context outlives this function
|
||||
ctx, _ = context.WithTimeout(ctx, timeout)
|
||||
}
|
||||
if len(pairs) == 0 {
|
||||
return ctx, nil, nil
|
||||
}
|
||||
md := metadata.Pairs(pairs...)
|
||||
for _, mda := range mux.metadataAnnotators {
|
||||
md = metadata.Join(md, mda(ctx, req))
|
||||
}
|
||||
return ctx, md, nil
|
||||
}
|
||||
|
||||
// ServerMetadata consists of metadata sent from gRPC server.
|
||||
type ServerMetadata struct {
|
||||
HeaderMD metadata.MD
|
||||
TrailerMD metadata.MD
|
||||
}
|
||||
|
||||
type serverMetadataKey struct{}
|
||||
|
||||
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||
}
|
||||
|
||||
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||
return
|
||||
}
|
||||
|
||||
// ServerTransportStream implements grpc.ServerTransportStream.
|
||||
// It should only be used by the generated files to support grpc.SendHeader
|
||||
// outside of gRPC server use.
|
||||
type ServerTransportStream struct {
|
||||
mu sync.Mutex
|
||||
header metadata.MD
|
||||
trailer metadata.MD
|
||||
}
|
||||
|
||||
// Method returns the method for the stream.
|
||||
func (s *ServerTransportStream) Method() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Header returns the header metadata of the stream.
|
||||
func (s *ServerTransportStream) Header() metadata.MD {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.header.Copy()
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata.
|
||||
func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.header = metadata.Join(s.header, md)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendHeader sets the header metadata.
|
||||
func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
|
||||
return s.SetHeader(md)
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metadata.
|
||||
func (s *ServerTransportStream) Trailer() metadata.MD {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.trailer.Copy()
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata.
|
||||
func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.trailer = metadata.Join(s.trailer, md)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeoutDecode(s string) (time.Duration, error) {
|
||||
size := len(s)
|
||||
if size < 2 {
|
||||
return 0, fmt.Errorf("timeout string is too short: %q", s)
|
||||
}
|
||||
d, ok := timeoutUnitToDuration(s[size-1])
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
|
||||
}
|
||||
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return d * time.Duration(t), nil
|
||||
}
|
||||
|
||||
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||
switch u {
|
||||
case 'H':
|
||||
return time.Hour, true
|
||||
case 'M':
|
||||
return time.Minute, true
|
||||
case 'S':
|
||||
return time.Second, true
|
||||
case 'm':
|
||||
return time.Millisecond, true
|
||||
case 'u':
|
||||
return time.Microsecond, true
|
||||
case 'n':
|
||||
return time.Nanosecond, true
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||
// permanent request headers maintained by IANA.
|
||||
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
||||
func isPermanentHTTPHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case
|
||||
"Accept",
|
||||
"Accept-Charset",
|
||||
"Accept-Language",
|
||||
"Accept-Ranges",
|
||||
"Authorization",
|
||||
"Cache-Control",
|
||||
"Content-Type",
|
||||
"Cookie",
|
||||
"Date",
|
||||
"Expect",
|
||||
"From",
|
||||
"Host",
|
||||
"If-Match",
|
||||
"If-Modified-Since",
|
||||
"If-None-Match",
|
||||
"If-Schedule-Tag-Match",
|
||||
"If-Unmodified-Since",
|
||||
"Max-Forwards",
|
||||
"Origin",
|
||||
"Pragma",
|
||||
"Referer",
|
||||
"User-Agent",
|
||||
"Via",
|
||||
"Warning":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RPCMethod returns the method string for the server context. The returned
|
||||
// string is in the format of "/package.service/method".
|
||||
func RPCMethod(ctx context.Context) (string, bool) {
|
||||
m := ctx.Value(rpcMethodKey{})
|
||||
if m == nil {
|
||||
return "", false
|
||||
}
|
||||
ms, ok := m.(string)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return ms, true
|
||||
}
|
||||
|
||||
func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
|
||||
return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
|
||||
}
|
||||
|
||||
// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
|
||||
// The format of the returned string is defined by the google.api.http path template type.
|
||||
func HTTPPathPattern(ctx context.Context) (string, bool) {
|
||||
m := ctx.Value(httpPathPatternKey{})
|
||||
if m == nil {
|
||||
return "", false
|
||||
}
|
||||
ms, ok := m.(string)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return ms, true
|
||||
}
|
||||
|
||||
func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
|
||||
return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
|
||||
}
|
322
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
Normal file
322
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
Normal file
|
@ -0,0 +1,322 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
)
|
||||
|
||||
// String just returns the given string.
|
||||
// It is just for compatibility to other types.
|
||||
func String(val string) (string, error) {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// StringSlice converts 'val' where individual strings are separated by
|
||||
// 'sep' into a string slice.
|
||||
func StringSlice(val, sep string) ([]string, error) {
|
||||
return strings.Split(val, sep), nil
|
||||
}
|
||||
|
||||
// Bool converts the given string representation of a boolean value into bool.
|
||||
func Bool(val string) (bool, error) {
|
||||
return strconv.ParseBool(val)
|
||||
}
|
||||
|
||||
// BoolSlice converts 'val' where individual booleans are separated by
|
||||
// 'sep' into a bool slice.
|
||||
func BoolSlice(val, sep string) ([]bool, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]bool, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Bool(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Float64 converts the given string representation into representation of a floating point number into float64.
|
||||
func Float64(val string) (float64, error) {
|
||||
return strconv.ParseFloat(val, 64)
|
||||
}
|
||||
|
||||
// Float64Slice converts 'val' where individual floating point numbers are separated by
|
||||
// 'sep' into a float64 slice.
|
||||
func Float64Slice(val, sep string) ([]float64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]float64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Float64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Float32 converts the given string representation of a floating point number into float32.
|
||||
func Float32(val string) (float32, error) {
|
||||
f, err := strconv.ParseFloat(val, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float32(f), nil
|
||||
}
|
||||
|
||||
// Float32Slice converts 'val' where individual floating point numbers are separated by
|
||||
// 'sep' into a float32 slice.
|
||||
func Float32Slice(val, sep string) ([]float32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]float32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Float32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Int64 converts the given string representation of an integer into int64.
|
||||
func Int64(val string) (int64, error) {
|
||||
return strconv.ParseInt(val, 0, 64)
|
||||
}
|
||||
|
||||
// Int64Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a int64 slice.
|
||||
func Int64Slice(val, sep string) ([]int64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Int64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Int32 converts the given string representation of an integer into int32.
|
||||
func Int32(val string) (int32, error) {
|
||||
i, err := strconv.ParseInt(val, 0, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int32(i), nil
|
||||
}
|
||||
|
||||
// Int32Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a int32 slice.
|
||||
func Int32Slice(val, sep string) ([]int32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Int32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Uint64 converts the given string representation of an integer into uint64.
|
||||
func Uint64(val string) (uint64, error) {
|
||||
return strconv.ParseUint(val, 0, 64)
|
||||
}
|
||||
|
||||
// Uint64Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a uint64 slice.
|
||||
func Uint64Slice(val, sep string) ([]uint64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]uint64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Uint64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Uint32 converts the given string representation of an integer into uint32.
|
||||
func Uint32(val string) (uint32, error) {
|
||||
i, err := strconv.ParseUint(val, 0, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(i), nil
|
||||
}
|
||||
|
||||
// Uint32Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a uint32 slice.
|
||||
func Uint32Slice(val, sep string) ([]uint32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]uint32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Uint32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Bytes converts the given string representation of a byte sequence into a slice of bytes
|
||||
// A bytes sequence is encoded in URL-safe base64 without padding
|
||||
func Bytes(val string) ([]byte, error) {
|
||||
b, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
b, err = base64.URLEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
|
||||
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
|
||||
func BytesSlice(val, sep string) ([][]byte, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([][]byte, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Bytes(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
|
||||
func Timestamp(val string) (*timestamppb.Timestamp, error) {
|
||||
var r timestamppb.Timestamp
|
||||
val = strconv.Quote(strings.Trim(val, `"`))
|
||||
unmarshaler := &protojson.UnmarshalOptions{}
|
||||
err := unmarshaler.Unmarshal([]byte(val), &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// Duration converts the given string into a timestamp.Duration.
|
||||
func Duration(val string) (*durationpb.Duration, error) {
|
||||
var r durationpb.Duration
|
||||
val = strconv.Quote(strings.Trim(val, `"`))
|
||||
unmarshaler := &protojson.UnmarshalOptions{}
|
||||
err := unmarshaler.Unmarshal([]byte(val), &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// Enum converts the given string into an int32 that should be type casted into the
|
||||
// correct enum proto type.
|
||||
func Enum(val string, enumValMap map[string]int32) (int32, error) {
|
||||
e, ok := enumValMap[val]
|
||||
if ok {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
i, err := Int32(val)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s is not valid", val)
|
||||
}
|
||||
for _, v := range enumValMap {
|
||||
if v == i {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("%s is not valid", val)
|
||||
}
|
||||
|
||||
// EnumSlice converts 'val' where individual enums are separated by 'sep'
|
||||
// into a int32 slice. Each individual int32 should be type casted into the
|
||||
// correct enum proto type.
|
||||
func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Enum(v, enumValMap)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Support fot google.protobuf.wrappers on top of primitive types
|
||||
*/
|
||||
|
||||
// StringValue well-known type support as wrapper around string type
|
||||
func StringValue(val string) (*wrapperspb.StringValue, error) {
|
||||
return &wrapperspb.StringValue{Value: val}, nil
|
||||
}
|
||||
|
||||
// FloatValue well-known type support as wrapper around float32 type
|
||||
func FloatValue(val string) (*wrapperspb.FloatValue, error) {
|
||||
parsedVal, err := Float32(val)
|
||||
return &wrapperspb.FloatValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// DoubleValue well-known type support as wrapper around float64 type
|
||||
func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
|
||||
parsedVal, err := Float64(val)
|
||||
return &wrapperspb.DoubleValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// BoolValue well-known type support as wrapper around bool type
|
||||
func BoolValue(val string) (*wrapperspb.BoolValue, error) {
|
||||
parsedVal, err := Bool(val)
|
||||
return &wrapperspb.BoolValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// Int32Value well-known type support as wrapper around int32 type
|
||||
func Int32Value(val string) (*wrapperspb.Int32Value, error) {
|
||||
parsedVal, err := Int32(val)
|
||||
return &wrapperspb.Int32Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// UInt32Value well-known type support as wrapper around uint32 type
|
||||
func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
|
||||
parsedVal, err := Uint32(val)
|
||||
return &wrapperspb.UInt32Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// Int64Value well-known type support as wrapper around int64 type
|
||||
func Int64Value(val string) (*wrapperspb.Int64Value, error) {
|
||||
parsedVal, err := Int64(val)
|
||||
return &wrapperspb.Int64Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// UInt64Value well-known type support as wrapper around uint64 type
|
||||
func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
|
||||
parsedVal, err := Uint64(val)
|
||||
return &wrapperspb.UInt64Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// BytesValue well-known type support as wrapper around bytes[] type
|
||||
func BytesValue(val string) (*wrapperspb.BytesValue, error) {
|
||||
parsedVal, err := Bytes(val)
|
||||
return &wrapperspb.BytesValue{Value: parsedVal}, err
|
||||
}
|
5
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
generated
vendored
Normal file
5
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
/*
|
||||
Package runtime contains runtime helper functions used by
|
||||
servers which protoc-gen-grpc-gateway generates.
|
||||
*/
|
||||
package runtime
|
180
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
Normal file
180
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ErrorHandlerFunc is the signature used to configure error handling.
|
||||
type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
|
||||
|
||||
// StreamErrorHandlerFunc is the signature used to configure stream error handling.
|
||||
type StreamErrorHandlerFunc func(context.Context, error) *status.Status
|
||||
|
||||
// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
|
||||
type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
|
||||
|
||||
// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
|
||||
// passed to the DefaultRoutingErrorHandler.
|
||||
type HTTPStatusError struct {
|
||||
HTTPStatus int
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *HTTPStatusError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
|
||||
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
|
||||
func HTTPStatusFromCode(code codes.Code) int {
|
||||
switch code {
|
||||
case codes.OK:
|
||||
return http.StatusOK
|
||||
case codes.Canceled:
|
||||
return http.StatusRequestTimeout
|
||||
case codes.Unknown:
|
||||
return http.StatusInternalServerError
|
||||
case codes.InvalidArgument:
|
||||
return http.StatusBadRequest
|
||||
case codes.DeadlineExceeded:
|
||||
return http.StatusGatewayTimeout
|
||||
case codes.NotFound:
|
||||
return http.StatusNotFound
|
||||
case codes.AlreadyExists:
|
||||
return http.StatusConflict
|
||||
case codes.PermissionDenied:
|
||||
return http.StatusForbidden
|
||||
case codes.Unauthenticated:
|
||||
return http.StatusUnauthorized
|
||||
case codes.ResourceExhausted:
|
||||
return http.StatusTooManyRequests
|
||||
case codes.FailedPrecondition:
|
||||
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
|
||||
return http.StatusBadRequest
|
||||
case codes.Aborted:
|
||||
return http.StatusConflict
|
||||
case codes.OutOfRange:
|
||||
return http.StatusBadRequest
|
||||
case codes.Unimplemented:
|
||||
return http.StatusNotImplemented
|
||||
case codes.Internal:
|
||||
return http.StatusInternalServerError
|
||||
case codes.Unavailable:
|
||||
return http.StatusServiceUnavailable
|
||||
case codes.DataLoss:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
grpclog.Infof("Unknown gRPC error code: %v", code)
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
// HTTPError uses the mux-configured error handler.
|
||||
func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||
mux.errorHandler(ctx, mux, marshaler, w, r, err)
|
||||
}
|
||||
|
||||
// DefaultHTTPErrorHandler is the default error handler.
|
||||
// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||
// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
|
||||
// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
|
||||
// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
|
||||
// are insufficient for.
|
||||
// If otherwise, it replies with http.StatusInternalServerError.
|
||||
//
|
||||
// The response body written by this function is a Status message marshaled by the Marshaler.
|
||||
func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||
// return Internal when Marshal failed
|
||||
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
|
||||
|
||||
var customStatus *HTTPStatusError
|
||||
if errors.As(err, &customStatus) {
|
||||
err = customStatus.Err
|
||||
}
|
||||
|
||||
s := status.Convert(err)
|
||||
pb := s.Proto()
|
||||
|
||||
w.Header().Del("Trailer")
|
||||
w.Header().Del("Transfer-Encoding")
|
||||
|
||||
contentType := marshaler.ContentType(pb)
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
||||
if s.Code() == codes.Unauthenticated {
|
||||
w.Header().Set("WWW-Authenticate", s.Message())
|
||||
}
|
||||
|
||||
buf, merr := marshaler.Marshal(pb)
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal error message %q: %v", s, merr)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
if _, err := io.WriteString(w, fallback); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
// Unless the request includes a TE header field indicating "trailers"
|
||||
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||
// generate trailer fields that it believes are necessary for the user
|
||||
// agent to receive.
|
||||
doForwardTrailers := requestAcceptsTrailers(r)
|
||||
|
||||
if doForwardTrailers {
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
|
||||
st := HTTPStatusFromCode(s.Code())
|
||||
if customStatus != nil {
|
||||
st = customStatus.HTTPStatus
|
||||
}
|
||||
|
||||
w.WriteHeader(st)
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
if doForwardTrailers {
|
||||
handleForwardResponseTrailer(w, md)
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
|
||||
return status.Convert(err)
|
||||
}
|
||||
|
||||
// DefaultRoutingErrorHandler is our default handler for routing errors.
|
||||
// By default http error codes mapped on the following error codes:
|
||||
// NotFound -> grpc.NotFound
|
||||
// StatusBadRequest -> grpc.InvalidArgument
|
||||
// MethodNotAllowed -> grpc.Unimplemented
|
||||
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
|
||||
sterr := status.Error(codes.Internal, "Unexpected routing error")
|
||||
switch httpStatus {
|
||||
case http.StatusBadRequest:
|
||||
sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
|
||||
case http.StatusMethodNotAllowed:
|
||||
sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
|
||||
case http.StatusNotFound:
|
||||
sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
|
||||
}
|
||||
mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
|
||||
}
|
165
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
Normal file
165
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
Normal file
|
@ -0,0 +1,165 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
|
||||
fd := fields.ByName(protoreflect.Name(name))
|
||||
if fd != nil {
|
||||
return fd
|
||||
}
|
||||
|
||||
return fields.ByJSONName(name)
|
||||
}
|
||||
|
||||
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
|
||||
func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
|
||||
fm := &field_mask.FieldMask{}
|
||||
var root interface{}
|
||||
|
||||
if err := json.NewDecoder(r).Decode(&root); err != nil {
|
||||
if err == io.EOF {
|
||||
return fm, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
|
||||
for len(queue) > 0 {
|
||||
// dequeue an item
|
||||
item := queue[0]
|
||||
queue = queue[1:]
|
||||
|
||||
m, ok := item.node.(map[string]interface{})
|
||||
switch {
|
||||
case ok:
|
||||
// if the item is an object, then enqueue all of its children
|
||||
for k, v := range m {
|
||||
if item.msg == nil {
|
||||
return nil, fmt.Errorf("JSON structure did not match request type")
|
||||
}
|
||||
|
||||
fd := getFieldByName(item.msg.Descriptor().Fields(), k)
|
||||
if fd == nil {
|
||||
return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
|
||||
}
|
||||
|
||||
if isDynamicProtoMessage(fd.Message()) {
|
||||
for _, p := range buildPathsBlindly(k, v) {
|
||||
newPath := p
|
||||
if item.path != "" {
|
||||
newPath = item.path + "." + newPath
|
||||
}
|
||||
queue = append(queue, fieldMaskPathItem{path: newPath})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if isProtobufAnyMessage(fd.Message()) {
|
||||
_, hasTypeField := v.(map[string]interface{})["@type"]
|
||||
if hasTypeField {
|
||||
queue = append(queue, fieldMaskPathItem{path: k})
|
||||
continue
|
||||
} else {
|
||||
return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
child := fieldMaskPathItem{
|
||||
node: v,
|
||||
}
|
||||
if item.path == "" {
|
||||
child.path = string(fd.FullName().Name())
|
||||
} else {
|
||||
child.path = item.path + "." + string(fd.FullName().Name())
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList(), fd.IsMap():
|
||||
// As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
|
||||
// Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
|
||||
fm.Paths = append(fm.Paths, child.path)
|
||||
case fd.Message() != nil:
|
||||
child.msg = item.msg.Get(fd).Message()
|
||||
fallthrough
|
||||
default:
|
||||
queue = append(queue, child)
|
||||
}
|
||||
}
|
||||
case len(item.path) > 0:
|
||||
// otherwise, it's a leaf node so print its path
|
||||
fm.Paths = append(fm.Paths, item.path)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort for deterministic output in the presence
|
||||
// of repeated fields.
|
||||
sort.Strings(fm.Paths)
|
||||
|
||||
return fm, nil
|
||||
}
|
||||
|
||||
func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
|
||||
return md != nil && (md.FullName() == "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
|
||||
return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
|
||||
}
|
||||
|
||||
// buildPathsBlindly does not attempt to match proto field names to the
|
||||
// json value keys. Instead it relies completely on the structure of
|
||||
// the unmarshalled json contained within in.
|
||||
// Returns a slice containing all subpaths with the root at the
|
||||
// passed in name and json value.
|
||||
func buildPathsBlindly(name string, in interface{}) []string {
|
||||
m, ok := in.(map[string]interface{})
|
||||
if !ok {
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
var paths []string
|
||||
queue := []fieldMaskPathItem{{path: name, node: m}}
|
||||
for len(queue) > 0 {
|
||||
cur := queue[0]
|
||||
queue = queue[1:]
|
||||
|
||||
m, ok := cur.node.(map[string]interface{})
|
||||
if !ok {
|
||||
// This should never happen since we should always check that we only add
|
||||
// nodes of type map[string]interface{} to the queue.
|
||||
continue
|
||||
}
|
||||
for k, v := range m {
|
||||
if mi, ok := v.(map[string]interface{}); ok {
|
||||
queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
|
||||
} else {
|
||||
// This is not a struct, so there are no more levels to descend.
|
||||
curPath := cur.path + "." + k
|
||||
paths = append(paths, curPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
|
||||
type fieldMaskPathItem struct {
|
||||
// the list of prior fields leading up to node connected by dots
|
||||
path string
|
||||
|
||||
// a generic decoded json object the current item to inspect for further path extraction
|
||||
node interface{}
|
||||
|
||||
// parent message
|
||||
msg protoreflect.Message
|
||||
}
|
223
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
Normal file
223
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/genproto/googleapis/api/httpbody"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ForwardResponseStream forwards the stream from gRPC server to REST client.
|
||||
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||
f, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
grpclog.Infof("Flush not supported in %T", w)
|
||||
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
http.Error(w, "unexpected error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
var delimiter []byte
|
||||
if d, ok := marshaler.(Delimited); ok {
|
||||
delimiter = d.Delimiter()
|
||||
} else {
|
||||
delimiter = []byte("\n")
|
||||
}
|
||||
|
||||
var wroteHeader bool
|
||||
for {
|
||||
resp, err := recv()
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !wroteHeader {
|
||||
w.Header().Set("Content-Type", marshaler.ContentType(resp))
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
httpBody, isHTTPBody := resp.(*httpbody.HttpBody)
|
||||
switch {
|
||||
case resp == nil:
|
||||
buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
|
||||
case isHTTPBody:
|
||||
buf = httpBody.GetData()
|
||||
default:
|
||||
result := map[string]interface{}{"result": resp}
|
||||
if rb, ok := resp.(responseBody); ok {
|
||||
result["result"] = rb.XXX_ResponseBody()
|
||||
}
|
||||
|
||||
buf, err = marshaler.Marshal(result)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
if _, err = w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to send response chunk: %v", err)
|
||||
return
|
||||
}
|
||||
wroteHeader = true
|
||||
if _, err = w.Write(delimiter); err != nil {
|
||||
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||
return
|
||||
}
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
|
||||
for k, vs := range md.HeaderMD {
|
||||
if h, ok := mux.outgoingHeaderMatcher(k); ok {
|
||||
for _, v := range vs {
|
||||
w.Header().Add(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
|
||||
for k := range md.TrailerMD {
|
||||
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
|
||||
w.Header().Add("Trailer", tKey)
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
|
||||
for k, vs := range md.TrailerMD {
|
||||
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
|
||||
for _, v := range vs {
|
||||
w.Header().Add(tKey, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// responseBody interface contains method for getting field for marshaling to the response body
|
||||
// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
|
||||
type responseBody interface {
|
||||
XXX_ResponseBody() interface{}
|
||||
}
|
||||
|
||||
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
// Unless the request includes a TE header field indicating "trailers"
|
||||
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||
// generate trailer fields that it believes are necessary for the user
|
||||
// agent to receive.
|
||||
doForwardTrailers := requestAcceptsTrailers(req)
|
||||
|
||||
if doForwardTrailers {
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
|
||||
contentType := marshaler.ContentType(resp)
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
var buf []byte
|
||||
var err error
|
||||
if rb, ok := resp.(responseBody); ok {
|
||||
buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
|
||||
} else {
|
||||
buf, err = marshaler.Marshal(resp)
|
||||
}
|
||||
if err != nil {
|
||||
grpclog.Infof("Marshal error: %v", err)
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
if doForwardTrailers {
|
||||
handleForwardResponseTrailer(w, md)
|
||||
}
|
||||
}
|
||||
|
||||
func requestAcceptsTrailers(req *http.Request) bool {
|
||||
te := req.Header.Get("TE")
|
||||
return strings.Contains(strings.ToLower(te), "trailers")
|
||||
}
|
||||
|
||||
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
|
||||
if len(opts) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, w, resp); err != nil {
|
||||
grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
|
||||
st := mux.streamErrorHandler(ctx, err)
|
||||
msg := errorChunk(st)
|
||||
if !wroteHeader {
|
||||
w.Header().Set("Content-Type", marshaler.ContentType(msg))
|
||||
w.WriteHeader(HTTPStatusFromCode(st.Code()))
|
||||
}
|
||||
buf, merr := marshaler.Marshal(msg)
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal an error: %v", merr)
|
||||
return
|
||||
}
|
||||
if _, werr := w.Write(buf); werr != nil {
|
||||
grpclog.Infof("Failed to notify error to client: %v", werr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func errorChunk(st *status.Status) map[string]proto.Message {
|
||||
return map[string]proto.Message{"error": st.Proto()}
|
||||
}
|
32
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
generated
vendored
Normal file
32
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"google.golang.org/genproto/googleapis/api/httpbody"
|
||||
)
|
||||
|
||||
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
|
||||
// google.api.HttpBody message as the full response body if it is
|
||||
// the actual message used as the response. If not, then this will
|
||||
// simply fallback to the Marshaler specified as its default Marshaler.
|
||||
type HTTPBodyMarshaler struct {
|
||||
Marshaler
|
||||
}
|
||||
|
||||
// ContentType returns its specified content type in case v is a
|
||||
// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
|
||||
// content type.
|
||||
func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
|
||||
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||
return httpBody.GetContentType()
|
||||
}
|
||||
return h.Marshaler.ContentType(v)
|
||||
}
|
||||
|
||||
// Marshal marshals "v" by returning the body bytes if v is a
|
||||
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
|
||||
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
|
||||
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||
return httpBody.Data, nil
|
||||
}
|
||||
return h.Marshaler.Marshal(v)
|
||||
}
|
45
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
generated
vendored
Normal file
45
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
|
||||
// with the standard "encoding/json" package of Golang.
|
||||
// Although it is generally faster for simple proto messages than JSONPb,
|
||||
// it does not support advanced features of protobuf, e.g. map, oneof, ....
|
||||
//
|
||||
// The NewEncoder and NewDecoder types return *json.Encoder and
|
||||
// *json.Decoder respectively.
|
||||
type JSONBuiltin struct{}
|
||||
|
||||
// ContentType always Returns "application/json".
|
||||
func (*JSONBuiltin) ContentType(_ interface{}) string {
|
||||
return "application/json"
|
||||
}
|
||||
|
||||
// Marshal marshals "v" into JSON
|
||||
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals JSON data into "v".
|
||||
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
|
||||
return json.NewDecoder(r)
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
|
||||
return json.NewEncoder(w)
|
||||
}
|
||||
|
||||
// Delimiter for newline encoded JSON streams.
|
||||
func (j *JSONBuiltin) Delimiter() []byte {
|
||||
return []byte("\n")
|
||||
}
|
344
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
Normal file
344
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
Normal file
|
@ -0,0 +1,344 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
|
||||
// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
|
||||
// It supports the full functionality of protobuf unlike JSONBuiltin.
|
||||
//
|
||||
// The NewDecoder method returns a DecoderWrapper, so the underlying
|
||||
// *json.Decoder methods can be used.
|
||||
type JSONPb struct {
|
||||
protojson.MarshalOptions
|
||||
protojson.UnmarshalOptions
|
||||
}
|
||||
|
||||
// ContentType always returns "application/json".
|
||||
func (*JSONPb) ContentType(_ interface{}) string {
|
||||
return "application/json"
|
||||
}
|
||||
|
||||
// Marshal marshals "v" into JSON.
|
||||
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||
if _, ok := v.(proto.Message); !ok {
|
||||
return j.marshalNonProtoField(v)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := j.marshalTo(&buf, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
|
||||
p, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
buf, err := j.marshalNonProtoField(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(buf)
|
||||
return err
|
||||
}
|
||||
b, err := j.MarshalOptions.Marshal(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
// protoMessageType is stored to prevent constant lookup of the same type at runtime.
|
||||
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||
)
|
||||
|
||||
// marshalNonProto marshals a non-message field of a protobuf message.
|
||||
// This function does not correctly marshal arbitrary data structures into JSON,
|
||||
// it is only capable of marshaling non-message field values of protobuf,
|
||||
// i.e. primitive types, enums; pointers to primitives or enums; maps from
|
||||
// integer/string types to primitives/enums/pointers to messages.
|
||||
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||
if v == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
for rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
rv = rv.Elem()
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Slice {
|
||||
if rv.IsNil() {
|
||||
if j.EmitUnpopulated {
|
||||
return []byte("[]"), nil
|
||||
}
|
||||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
if rv.Type().Elem().Implements(protoMessageType) {
|
||||
var buf bytes.Buffer
|
||||
err := buf.WriteByte('[')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
if i != 0 {
|
||||
err = buf.WriteByte(',')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = buf.WriteByte(']')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
if rv.Type().Elem().Implements(typeProtoEnum) {
|
||||
var buf bytes.Buffer
|
||||
err := buf.WriteByte('[')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
if i != 0 {
|
||||
err = buf.WriteByte(',')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if j.UseEnumNumbers {
|
||||
_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
|
||||
} else {
|
||||
_, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = buf.WriteByte(']')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Map {
|
||||
m := make(map[string]*json.RawMessage)
|
||||
for _, k := range rv.MapKeys() {
|
||||
buf, err := j.Marshal(rv.MapIndex(k).Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||
}
|
||||
if j.Indent != "" {
|
||||
return json.MarshalIndent(m, "", j.Indent)
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
|
||||
return json.Marshal(enum.String())
|
||||
}
|
||||
return json.Marshal(rv.Interface())
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals JSON "data" into "v"
|
||||
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||
return unmarshalJSONPb(data, j.UnmarshalOptions, v)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
|
||||
d := json.NewDecoder(r)
|
||||
return DecoderWrapper{
|
||||
Decoder: d,
|
||||
UnmarshalOptions: j.UnmarshalOptions,
|
||||
}
|
||||
}
|
||||
|
||||
// DecoderWrapper is a wrapper around a *json.Decoder that adds
|
||||
// support for protos to the Decode method.
|
||||
type DecoderWrapper struct {
|
||||
*json.Decoder
|
||||
protojson.UnmarshalOptions
|
||||
}
|
||||
|
||||
// Decode wraps the embedded decoder's Decode method to support
|
||||
// protos using a jsonpb.Unmarshaler.
|
||||
func (d DecoderWrapper) Decode(v interface{}) error {
|
||||
return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
|
||||
return EncoderFunc(func(v interface{}) error {
|
||||
if err := j.marshalTo(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
// mimic json.Encoder by adding a newline (makes output
|
||||
// easier to read when it contains multiple encoded items)
|
||||
_, err := w.Write(j.Delimiter())
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||
d := json.NewDecoder(bytes.NewReader(data))
|
||||
return decodeJSONPb(d, unmarshaler, v)
|
||||
}
|
||||
|
||||
func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||
p, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return decodeNonProtoField(d, unmarshaler, v)
|
||||
}
|
||||
|
||||
// Decode into bytes for marshalling
|
||||
var b json.RawMessage
|
||||
err := d.Decode(&b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unmarshaler.Unmarshal([]byte(b), p)
|
||||
}
|
||||
|
||||
func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("%T is not a pointer", v)
|
||||
}
|
||||
for rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.New(rv.Type().Elem()))
|
||||
}
|
||||
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
||||
// Decode into bytes for marshalling
|
||||
var b json.RawMessage
|
||||
err := d.Decode(&b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
|
||||
}
|
||||
rv = rv.Elem()
|
||||
}
|
||||
if rv.Kind() == reflect.Map {
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
conv, ok := convFromType[rv.Type().Key().Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
|
||||
}
|
||||
|
||||
m := make(map[string]*json.RawMessage)
|
||||
if err := d.Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
bk := result[0]
|
||||
bv := reflect.New(rv.Type().Elem())
|
||||
if v == nil {
|
||||
null := json.RawMessage("null")
|
||||
v = &null
|
||||
}
|
||||
if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
rv.SetMapIndex(bk, bv.Elem())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if rv.Kind() == reflect.Slice {
|
||||
var sl []json.RawMessage
|
||||
if err := d.Decode(&sl); err != nil {
|
||||
return err
|
||||
}
|
||||
if sl != nil {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
|
||||
}
|
||||
for _, item := range sl {
|
||||
bv := reflect.New(rv.Type().Elem())
|
||||
if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(reflect.Append(rv, bv.Elem()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if _, ok := rv.Interface().(protoEnum); ok {
|
||||
var repr interface{}
|
||||
if err := d.Decode(&repr); err != nil {
|
||||
return err
|
||||
}
|
||||
switch v := repr.(type) {
|
||||
case string:
|
||||
// TODO(yugui) Should use proto.StructProperties?
|
||||
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
|
||||
case float64:
|
||||
rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
|
||||
}
|
||||
}
|
||||
return d.Decode(v)
|
||||
}
|
||||
|
||||
type protoEnum interface {
|
||||
fmt.Stringer
|
||||
EnumDescriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
|
||||
|
||||
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||
|
||||
// Delimiter for newline encoded JSON streams.
|
||||
func (j *JSONPb) Delimiter() []byte {
|
||||
return []byte("\n")
|
||||
}
|
||||
|
||||
var (
|
||||
convFromType = map[reflect.Kind]reflect.Value{
|
||||
reflect.String: reflect.ValueOf(String),
|
||||
reflect.Bool: reflect.ValueOf(Bool),
|
||||
reflect.Float64: reflect.ValueOf(Float64),
|
||||
reflect.Float32: reflect.ValueOf(Float32),
|
||||
reflect.Int64: reflect.ValueOf(Int64),
|
||||
reflect.Int32: reflect.ValueOf(Int32),
|
||||
reflect.Uint64: reflect.ValueOf(Uint64),
|
||||
reflect.Uint32: reflect.ValueOf(Uint32),
|
||||
reflect.Slice: reflect.ValueOf(Bytes),
|
||||
}
|
||||
)
|
63
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
generated
vendored
Normal file
63
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
|
||||
type ProtoMarshaller struct{}
|
||||
|
||||
// ContentType always returns "application/octet-stream".
|
||||
func (*ProtoMarshaller) ContentType(_ interface{}) string {
|
||||
return "application/octet-stream"
|
||||
}
|
||||
|
||||
// Marshal marshals "value" into Proto
|
||||
func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
|
||||
message, ok := value.(proto.Message)
|
||||
if !ok {
|
||||
return nil, errors.New("unable to marshal non proto field")
|
||||
}
|
||||
return proto.Marshal(message)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals proto "data" into "value"
|
||||
func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
|
||||
message, ok := value.(proto.Message)
|
||||
if !ok {
|
||||
return errors.New("unable to unmarshal non proto field")
|
||||
}
|
||||
return proto.Unmarshal(data, message)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads proto stream from "reader".
|
||||
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
|
||||
return DecoderFunc(func(value interface{}) error {
|
||||
buffer, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return marshaller.Unmarshal(buffer, value)
|
||||
})
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes proto stream into "writer".
|
||||
func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
|
||||
return EncoderFunc(func(value interface{}) error {
|
||||
buffer, err := marshaller.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = writer.Write(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
50
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
generated
vendored
Normal file
50
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
|
||||
type Marshaler interface {
|
||||
// Marshal marshals "v" into byte sequence.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal unmarshals "data" into "v".
|
||||
// "v" must be a pointer value.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// NewDecoder returns a Decoder which reads byte sequence from "r".
|
||||
NewDecoder(r io.Reader) Decoder
|
||||
// NewEncoder returns an Encoder which writes bytes sequence into "w".
|
||||
NewEncoder(w io.Writer) Encoder
|
||||
// ContentType returns the Content-Type which this marshaler is responsible for.
|
||||
// The parameter describes the type which is being marshalled, which can sometimes
|
||||
// affect the content type returned.
|
||||
ContentType(v interface{}) string
|
||||
}
|
||||
|
||||
// Decoder decodes a byte sequence
|
||||
type Decoder interface {
|
||||
Decode(v interface{}) error
|
||||
}
|
||||
|
||||
// Encoder encodes gRPC payloads / fields into byte sequence.
|
||||
type Encoder interface {
|
||||
Encode(v interface{}) error
|
||||
}
|
||||
|
||||
// DecoderFunc adapts an decoder function into Decoder.
|
||||
type DecoderFunc func(v interface{}) error
|
||||
|
||||
// Decode delegates invocations to the underlying function itself.
|
||||
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
|
||||
|
||||
// EncoderFunc adapts an encoder function into Encoder
|
||||
type EncoderFunc func(v interface{}) error
|
||||
|
||||
// Encode delegates invocations to the underlying function itself.
|
||||
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
|
||||
|
||||
// Delimited defines the streaming delimiter.
|
||||
type Delimited interface {
|
||||
// Delimiter returns the record separator for the stream.
|
||||
Delimiter() []byte
|
||||
}
|
109
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
generated
vendored
Normal file
109
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
|
||||
// MIMEWildcard is the fallback MIME type used for requests which do not match
|
||||
// a registered MIME type.
|
||||
const MIMEWildcard = "*"
|
||||
|
||||
var (
|
||||
acceptHeader = http.CanonicalHeaderKey("Accept")
|
||||
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
|
||||
|
||||
defaultMarshaler = &HTTPBodyMarshaler{
|
||||
Marshaler: &JSONPb{
|
||||
MarshalOptions: protojson.MarshalOptions{
|
||||
EmitUnpopulated: true,
|
||||
},
|
||||
UnmarshalOptions: protojson.UnmarshalOptions{
|
||||
DiscardUnknown: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
|
||||
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
|
||||
// If it isn't set (or the request Content-Type is empty), checks for "*".
|
||||
// If there are multiple Content-Type headers set, choose the first one that it can
|
||||
// exactly match in the registry.
|
||||
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
|
||||
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
|
||||
for _, acceptVal := range r.Header[acceptHeader] {
|
||||
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
|
||||
outbound = m
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, contentTypeVal := range r.Header[contentTypeHeader] {
|
||||
contentType, _, err := mime.ParseMediaType(contentTypeVal)
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
|
||||
continue
|
||||
}
|
||||
if m, ok := mux.marshalers.mimeMap[contentType]; ok {
|
||||
inbound = m
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if inbound == nil {
|
||||
inbound = mux.marshalers.mimeMap[MIMEWildcard]
|
||||
}
|
||||
if outbound == nil {
|
||||
outbound = inbound
|
||||
}
|
||||
|
||||
return inbound, outbound
|
||||
}
|
||||
|
||||
// marshalerRegistry is a mapping from MIME types to Marshalers.
|
||||
type marshalerRegistry struct {
|
||||
mimeMap map[string]Marshaler
|
||||
}
|
||||
|
||||
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
|
||||
// MIME type).
|
||||
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
||||
if len(mime) == 0 {
|
||||
return errors.New("empty MIME type")
|
||||
}
|
||||
|
||||
m.mimeMap[mime] = marshaler
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeMarshalerMIMERegistry returns a new registry of marshalers.
|
||||
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
||||
//
|
||||
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
||||
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
||||
// with a "application/json" Content-Type.
|
||||
// "*" can be used to match any Content-Type.
|
||||
// This can be attached to a ServerMux with the marshaler option.
|
||||
func makeMarshalerMIMERegistry() marshalerRegistry {
|
||||
return marshalerRegistry{
|
||||
mimeMap: map[string]Marshaler{
|
||||
MIMEWildcard: defaultMarshaler,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
|
||||
// Marshalers to a MIME type in mux.
|
||||
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
if err := mux.marshalers.add(mime, marshaler); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
356
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
Normal file
356
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
Normal file
|
@ -0,0 +1,356 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
|
||||
type UnescapingMode int
|
||||
|
||||
const (
|
||||
// UnescapingModeLegacy is the default V2 behavior, which escapes the entire
|
||||
// path string before doing any routing.
|
||||
UnescapingModeLegacy UnescapingMode = iota
|
||||
|
||||
// EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
|
||||
// reserved characters.
|
||||
UnescapingModeAllExceptReserved
|
||||
|
||||
// EscapingTypeExceptSlash unescapes URL path parameters except path
|
||||
// seperators, which will be left as "%2F".
|
||||
UnescapingModeAllExceptSlash
|
||||
|
||||
// URL path parameters will be fully decoded.
|
||||
UnescapingModeAllCharacters
|
||||
|
||||
// UnescapingModeDefault is the default escaping type.
|
||||
// TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
|
||||
// reference implementation
|
||||
UnescapingModeDefault = UnescapingModeLegacy
|
||||
)
|
||||
|
||||
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||
|
||||
// ServeMux is a request multiplexer for grpc-gateway.
|
||||
// It matches http requests to patterns and invokes the corresponding handler.
|
||||
type ServeMux struct {
|
||||
// handlers maps HTTP method to a list of handlers.
|
||||
handlers map[string][]handler
|
||||
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
|
||||
marshalers marshalerRegistry
|
||||
incomingHeaderMatcher HeaderMatcherFunc
|
||||
outgoingHeaderMatcher HeaderMatcherFunc
|
||||
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
|
||||
errorHandler ErrorHandlerFunc
|
||||
streamErrorHandler StreamErrorHandlerFunc
|
||||
routingErrorHandler RoutingErrorHandlerFunc
|
||||
disablePathLengthFallback bool
|
||||
unescapingMode UnescapingMode
|
||||
}
|
||||
|
||||
// ServeMuxOption is an option that can be given to a ServeMux on construction.
|
||||
type ServeMuxOption func(*ServeMux)
|
||||
|
||||
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
|
||||
//
|
||||
// forwardResponseOption is an option that will be called on the relevant context.Context,
|
||||
// http.ResponseWriter, and proto.Message before every forwarded response.
|
||||
//
|
||||
// The message may be nil in the case where just a header is being sent.
|
||||
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
|
||||
}
|
||||
}
|
||||
|
||||
// WithEscapingType sets the escaping type. See the definitions of UnescapingMode
|
||||
// for more information.
|
||||
func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.unescapingMode = mode
|
||||
}
|
||||
}
|
||||
|
||||
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
|
||||
// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
|
||||
// done with careful consideration.
|
||||
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
currentQueryParser = queryParameterParser
|
||||
}
|
||||
}
|
||||
|
||||
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
|
||||
type HeaderMatcherFunc func(string) (string, bool)
|
||||
|
||||
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
|
||||
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
|
||||
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
|
||||
func DefaultHeaderMatcher(key string) (string, bool) {
|
||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||
if isPermanentHTTPHeader(key) {
|
||||
return MetadataPrefix + key, true
|
||||
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
||||
return key[len(MetadataHeaderPrefix):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
|
||||
//
|
||||
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
||||
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
||||
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
mux.incomingHeaderMatcher = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
||||
//
|
||||
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
||||
// passed to http response returned from gateway. To transform the header before passing to response,
|
||||
// matcher should return modified header.
|
||||
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
mux.outgoingHeaderMatcher = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
|
||||
//
|
||||
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
|
||||
// is reading token from cookie and adding it in gRPC context.
|
||||
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
|
||||
}
|
||||
}
|
||||
|
||||
// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
|
||||
//
|
||||
// This can be used to configure a custom error response.
|
||||
func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.errorHandler = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
|
||||
// error handler, which allows for customizing the error trailer for server-streaming
|
||||
// calls.
|
||||
//
|
||||
// For stream errors that occur before any response has been written, the mux's
|
||||
// ErrorHandler will be invoked. However, once data has been written, the errors must
|
||||
// be handled differently: they must be included in the response body. The response body's
|
||||
// final message will include the error details returned by the stream error handler.
|
||||
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.streamErrorHandler = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors.
|
||||
//
|
||||
// Method called for errors which can happen before gRPC route selected or executed.
|
||||
// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
|
||||
func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.routingErrorHandler = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
|
||||
func WithDisablePathLengthFallback() ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.disablePathLengthFallback = true
|
||||
}
|
||||
}
|
||||
|
||||
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||
serveMux := &ServeMux{
|
||||
handlers: make(map[string][]handler),
|
||||
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
|
||||
marshalers: makeMarshalerMIMERegistry(),
|
||||
errorHandler: DefaultHTTPErrorHandler,
|
||||
streamErrorHandler: DefaultStreamErrorHandler,
|
||||
routingErrorHandler: DefaultRoutingErrorHandler,
|
||||
unescapingMode: UnescapingModeDefault,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(serveMux)
|
||||
}
|
||||
|
||||
if serveMux.incomingHeaderMatcher == nil {
|
||||
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
|
||||
}
|
||||
|
||||
if serveMux.outgoingHeaderMatcher == nil {
|
||||
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
|
||||
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
|
||||
}
|
||||
}
|
||||
|
||||
return serveMux
|
||||
}
|
||||
|
||||
// Handle associates "h" to the pair of HTTP method and path pattern.
|
||||
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
|
||||
s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
|
||||
}
|
||||
|
||||
// HandlePath allows users to configure custom path handlers.
|
||||
// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
|
||||
func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
|
||||
compiler, err := httprule.Parse(pathPattern)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing path pattern: %w", err)
|
||||
}
|
||||
tp := compiler.Compile()
|
||||
pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating new pattern: %w", err)
|
||||
}
|
||||
s.Handle(meth, pattern, h)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
||||
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
path := r.URL.Path
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(v3): remove UnescapingModeLegacy
|
||||
if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
|
||||
path = r.URL.RawPath
|
||||
}
|
||||
|
||||
components := strings.Split(path[1:], "/")
|
||||
|
||||
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
||||
r.Method = strings.ToUpper(override)
|
||||
if err := r.ParseForm(); err != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Verb out here is to memoize for the fallback case below
|
||||
var verb string
|
||||
|
||||
for _, h := range s.handlers[r.Method] {
|
||||
// If the pattern has a verb, explicitly look for a suffix in the last
|
||||
// component that matches a colon plus the verb. This allows us to
|
||||
// handle some cases that otherwise can't be correctly handled by the
|
||||
// former LastIndex case, such as when the verb literal itself contains
|
||||
// a colon. This should work for all cases that have run through the
|
||||
// parser because we know what verb we're looking for, however, there
|
||||
// are still some cases that the parser itself cannot disambiguate. See
|
||||
// the comment there if interested.
|
||||
patVerb := h.pat.Verb()
|
||||
l := len(components)
|
||||
lastComponent := components[l-1]
|
||||
var idx int = -1
|
||||
if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
|
||||
idx = len(lastComponent) - len(patVerb) - 1
|
||||
}
|
||||
if idx == 0 {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if idx > 0 {
|
||||
components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
|
||||
}
|
||||
|
||||
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
|
||||
if err != nil {
|
||||
var mse MalformedSequenceError
|
||||
if ok := errors.As(err, &mse); ok {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
Err: mse,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
h.h(w, r, pathParams)
|
||||
return
|
||||
}
|
||||
|
||||
// lookup other methods to handle fallback from GET to POST and
|
||||
// to determine if it is NotImplemented or NotFound.
|
||||
for m, handlers := range s.handlers {
|
||||
if m == r.Method {
|
||||
continue
|
||||
}
|
||||
for _, h := range handlers {
|
||||
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
|
||||
if err != nil {
|
||||
var mse MalformedSequenceError
|
||||
if ok := errors.As(err, &mse); ok {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
Err: mse,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
||||
if s.isPathLengthFallback(r) {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||
return
|
||||
}
|
||||
h.h(w, r, pathParams)
|
||||
return
|
||||
}
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
|
||||
}
|
||||
|
||||
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
|
||||
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
|
||||
return s.forwardResponseOptions
|
||||
}
|
||||
|
||||
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
|
||||
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
|
||||
}
|
||||
|
||||
type handler struct {
|
||||
pat Pattern
|
||||
h HandlerFunc
|
||||
}
|
383
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
generated
vendored
Normal file
383
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,383 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
|
||||
ErrNotMatch = errors.New("not match to the path pattern")
|
||||
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
||||
ErrInvalidPattern = errors.New("invalid pattern")
|
||||
// ErrMalformedSequence indicates that an escape sequence was malformed.
|
||||
ErrMalformedSequence = errors.New("malformed escape sequence")
|
||||
)
|
||||
|
||||
type MalformedSequenceError string
|
||||
|
||||
func (e MalformedSequenceError) Error() string {
|
||||
return "malformed path escape " + strconv.Quote(string(e))
|
||||
}
|
||||
|
||||
type op struct {
|
||||
code utilities.OpCode
|
||||
operand int
|
||||
}
|
||||
|
||||
// Pattern is a template pattern of http request paths defined in
|
||||
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
|
||||
type Pattern struct {
|
||||
// ops is a list of operations
|
||||
ops []op
|
||||
// pool is a constant pool indexed by the operands or vars.
|
||||
pool []string
|
||||
// vars is a list of variables names to be bound by this pattern
|
||||
vars []string
|
||||
// stacksize is the max depth of the stack
|
||||
stacksize int
|
||||
// tailLen is the length of the fixed-size segments after a deep wildcard
|
||||
tailLen int
|
||||
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
|
||||
verb string
|
||||
}
|
||||
|
||||
// NewPattern returns a new Pattern from the given definition values.
|
||||
// "ops" is a sequence of op codes. "pool" is a constant pool.
|
||||
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
|
||||
// "version" must be 1 for now.
|
||||
// It returns an error if the given definition is invalid.
|
||||
func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
|
||||
if version != 1 {
|
||||
grpclog.Infof("unsupported version: %d", version)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
l := len(ops)
|
||||
if l%2 != 0 {
|
||||
grpclog.Infof("odd number of ops codes: %d", l)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
var (
|
||||
typedOps []op
|
||||
stack, maxstack int
|
||||
tailLen int
|
||||
pushMSeen bool
|
||||
vars []string
|
||||
)
|
||||
for i := 0; i < l; i += 2 {
|
||||
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush:
|
||||
if pushMSeen {
|
||||
tailLen++
|
||||
}
|
||||
stack++
|
||||
case utilities.OpPushM:
|
||||
if pushMSeen {
|
||||
grpclog.Infof("pushM appears twice")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
pushMSeen = true
|
||||
stack++
|
||||
case utilities.OpLitPush:
|
||||
if op.operand < 0 || len(pool) <= op.operand {
|
||||
grpclog.Infof("negative literal index: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
if pushMSeen {
|
||||
tailLen++
|
||||
}
|
||||
stack++
|
||||
case utilities.OpConcatN:
|
||||
if op.operand <= 0 {
|
||||
grpclog.Infof("negative concat size: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
stack -= op.operand
|
||||
if stack < 0 {
|
||||
grpclog.Info("stack underflow")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
stack++
|
||||
case utilities.OpCapture:
|
||||
if op.operand < 0 || len(pool) <= op.operand {
|
||||
grpclog.Infof("variable name index out of bound: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
v := pool[op.operand]
|
||||
op.operand = len(vars)
|
||||
vars = append(vars, v)
|
||||
stack--
|
||||
if stack < 0 {
|
||||
grpclog.Infof("stack underflow")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
default:
|
||||
grpclog.Infof("invalid opcode: %d", op.code)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
if maxstack < stack {
|
||||
maxstack = stack
|
||||
}
|
||||
typedOps = append(typedOps, op)
|
||||
}
|
||||
return Pattern{
|
||||
ops: typedOps,
|
||||
pool: pool,
|
||||
vars: vars,
|
||||
stacksize: maxstack,
|
||||
tailLen: tailLen,
|
||||
verb: verb,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
|
||||
func MustPattern(p Pattern, err error) Pattern {
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Pattern initialization failed: %v", err)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// MatchAndEscape examines components to determine if they match to a Pattern.
|
||||
// MatchAndEscape will return an error if no Patterns matched or if a pattern
|
||||
// matched but contained malformed escape sequences. If successful, the function
|
||||
// returns a mapping from field paths to their captured values.
|
||||
func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
|
||||
if p.verb != verb {
|
||||
if p.verb != "" {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
if len(components) == 0 {
|
||||
components = []string{":" + verb}
|
||||
} else {
|
||||
components = append([]string{}, components...)
|
||||
components[len(components)-1] += ":" + verb
|
||||
}
|
||||
}
|
||||
|
||||
var pos int
|
||||
stack := make([]string, 0, p.stacksize)
|
||||
captured := make([]string, len(p.vars))
|
||||
l := len(components)
|
||||
for _, op := range p.ops {
|
||||
var err error
|
||||
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush, utilities.OpLitPush:
|
||||
if pos >= l {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
c := components[pos]
|
||||
if op.code == utilities.OpLitPush {
|
||||
if lit := p.pool[op.operand]; c != lit {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
} else if op.code == utilities.OpPush {
|
||||
if c, err = unescape(c, unescapingMode, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
stack = append(stack, c)
|
||||
pos++
|
||||
case utilities.OpPushM:
|
||||
end := len(components)
|
||||
if end < pos+p.tailLen {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
end -= p.tailLen
|
||||
c := strings.Join(components[pos:end], "/")
|
||||
if c, err = unescape(c, unescapingMode, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stack = append(stack, c)
|
||||
pos = end
|
||||
case utilities.OpConcatN:
|
||||
n := op.operand
|
||||
l := len(stack) - n
|
||||
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||
case utilities.OpCapture:
|
||||
n := len(stack) - 1
|
||||
captured[op.operand] = stack[n]
|
||||
stack = stack[:n]
|
||||
}
|
||||
}
|
||||
if pos < l {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
bindings := make(map[string]string)
|
||||
for i, val := range captured {
|
||||
bindings[p.vars[i]] = val
|
||||
}
|
||||
return bindings, nil
|
||||
}
|
||||
|
||||
// MatchAndEscape examines components to determine if they match to a Pattern.
|
||||
// It will never perform per-component unescaping (see: UnescapingModeLegacy).
|
||||
// MatchAndEscape will return an error if no Patterns matched. If successful,
|
||||
// the function returns a mapping from field paths to their captured values.
|
||||
//
|
||||
// Deprecated: Use MatchAndEscape.
|
||||
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
|
||||
return p.MatchAndEscape(components, verb, UnescapingModeDefault)
|
||||
}
|
||||
|
||||
// Verb returns the verb part of the Pattern.
|
||||
func (p Pattern) Verb() string { return p.verb }
|
||||
|
||||
func (p Pattern) String() string {
|
||||
var stack []string
|
||||
for _, op := range p.ops {
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush:
|
||||
stack = append(stack, "*")
|
||||
case utilities.OpLitPush:
|
||||
stack = append(stack, p.pool[op.operand])
|
||||
case utilities.OpPushM:
|
||||
stack = append(stack, "**")
|
||||
case utilities.OpConcatN:
|
||||
n := op.operand
|
||||
l := len(stack) - n
|
||||
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||
case utilities.OpCapture:
|
||||
n := len(stack) - 1
|
||||
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
|
||||
}
|
||||
}
|
||||
segs := strings.Join(stack, "/")
|
||||
if p.verb != "" {
|
||||
return fmt.Sprintf("/%s:%s", segs, p.verb)
|
||||
}
|
||||
return "/" + segs
|
||||
}
|
||||
|
||||
/*
|
||||
* The following code is adopted and modified from Go's standard library
|
||||
* and carries the attached license.
|
||||
*
|
||||
* Copyright 2009 The Go Authors. All rights reserved.
|
||||
* Use of this source code is governed by a BSD-style
|
||||
* license that can be found in the LICENSE file.
|
||||
*/
|
||||
|
||||
// ishex returns whether or not the given byte is a valid hex character
|
||||
func ishex(c byte) bool {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRFC6570Reserved(c byte) bool {
|
||||
switch c {
|
||||
case '!', '#', '$', '&', '\'', '(', ')', '*',
|
||||
'+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// unhex converts a hex point to the bit representation
|
||||
func unhex(c byte) byte {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0'
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// shouldUnescapeWithMode returns true if the character is escapable with the
|
||||
// given mode
|
||||
func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
|
||||
switch mode {
|
||||
case UnescapingModeAllExceptReserved:
|
||||
if isRFC6570Reserved(c) {
|
||||
return false
|
||||
}
|
||||
case UnescapingModeAllExceptSlash:
|
||||
if c == '/' {
|
||||
return false
|
||||
}
|
||||
case UnescapingModeAllCharacters:
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// unescape unescapes a path string using the provided mode
|
||||
func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
|
||||
// TODO(v3): remove UnescapingModeLegacy
|
||||
if mode == UnescapingModeLegacy {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
if !multisegment {
|
||||
mode = UnescapingModeAllCharacters
|
||||
}
|
||||
|
||||
// Count %, check that they're well-formed.
|
||||
n := 0
|
||||
for i := 0; i < len(s); {
|
||||
if s[i] == '%' {
|
||||
n++
|
||||
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
|
||||
s = s[i:]
|
||||
if len(s) > 3 {
|
||||
s = s[:3]
|
||||
}
|
||||
|
||||
return "", MalformedSequenceError(s)
|
||||
}
|
||||
i += 3
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var t strings.Builder
|
||||
t.Grow(len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '%':
|
||||
c := unhex(s[i+1])<<4 | unhex(s[i+2])
|
||||
if shouldUnescapeWithMode(c, mode) {
|
||||
t.WriteByte(c)
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
t.WriteByte(s[i])
|
||||
}
|
||||
}
|
||||
|
||||
return t.String(), nil
|
||||
}
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
generated
vendored
Normal file
80
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// StringP returns a pointer to a string whose pointee is same as the given string value.
|
||||
func StringP(val string) (*string, error) {
|
||||
return proto.String(val), nil
|
||||
}
|
||||
|
||||
// BoolP parses the given string representation of a boolean value,
|
||||
// and returns a pointer to a bool whose value is same as the parsed value.
|
||||
func BoolP(val string) (*bool, error) {
|
||||
b, err := Bool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Bool(b), nil
|
||||
}
|
||||
|
||||
// Float64P parses the given string representation of a floating point number,
|
||||
// and returns a pointer to a float64 whose value is same as the parsed number.
|
||||
func Float64P(val string) (*float64, error) {
|
||||
f, err := Float64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Float64(f), nil
|
||||
}
|
||||
|
||||
// Float32P parses the given string representation of a floating point number,
|
||||
// and returns a pointer to a float32 whose value is same as the parsed number.
|
||||
func Float32P(val string) (*float32, error) {
|
||||
f, err := Float32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Float32(f), nil
|
||||
}
|
||||
|
||||
// Int64P parses the given string representation of an integer
|
||||
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
||||
func Int64P(val string) (*int64, error) {
|
||||
i, err := Int64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Int64(i), nil
|
||||
}
|
||||
|
||||
// Int32P parses the given string representation of an integer
|
||||
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
||||
func Int32P(val string) (*int32, error) {
|
||||
i, err := Int32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Int32(i), err
|
||||
}
|
||||
|
||||
// Uint64P parses the given string representation of an integer
|
||||
// and returns a pointer to a uint64 whose value is same as the parsed integer.
|
||||
func Uint64P(val string) (*uint64, error) {
|
||||
i, err := Uint64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Uint64(i), err
|
||||
}
|
||||
|
||||
// Uint32P parses the given string representation of an integer
|
||||
// and returns a pointer to a uint32 whose value is same as the parsed integer.
|
||||
func Uint32P(val string) (*uint32, error) {
|
||||
i, err := Uint32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Uint32(i), err
|
||||
}
|
329
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
Normal file
329
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
Normal file
|
@ -0,0 +1,329 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
)
|
||||
|
||||
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
|
||||
|
||||
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
|
||||
|
||||
// QueryParameterParser defines interface for all query parameter parsers
|
||||
type QueryParameterParser interface {
|
||||
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
|
||||
}
|
||||
|
||||
// PopulateQueryParameters parses query parameters
|
||||
// into "msg" using current query parser
|
||||
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
return currentQueryParser.Parse(msg, values, filter)
|
||||
}
|
||||
|
||||
type defaultQueryParser struct{}
|
||||
|
||||
// Parse populates "values" into "msg".
|
||||
// A value is ignored if its key starts with one of the elements in "filter".
|
||||
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
for key, values := range values {
|
||||
match := valuesKeyRegexp.FindStringSubmatch(key)
|
||||
if len(match) == 3 {
|
||||
key = match[1]
|
||||
values = append([]string{match[2]}, values...)
|
||||
}
|
||||
fieldPath := strings.Split(key, ".")
|
||||
if filter.HasCommonPrefix(fieldPath) {
|
||||
continue
|
||||
}
|
||||
if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
|
||||
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
|
||||
fieldPath := strings.Split(fieldPathString, ".")
|
||||
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
|
||||
}
|
||||
|
||||
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
|
||||
if len(fieldPath) < 1 {
|
||||
return errors.New("no field path")
|
||||
}
|
||||
if len(values) < 1 {
|
||||
return errors.New("no value provided")
|
||||
}
|
||||
|
||||
var fieldDescriptor protoreflect.FieldDescriptor
|
||||
for i, fieldName := range fieldPath {
|
||||
fields := msgValue.Descriptor().Fields()
|
||||
|
||||
// Get field by name
|
||||
fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
|
||||
if fieldDescriptor == nil {
|
||||
fieldDescriptor = fields.ByJSONName(fieldName)
|
||||
if fieldDescriptor == nil {
|
||||
// We're not returning an error here because this could just be
|
||||
// an extra query parameter that isn't part of the request.
|
||||
grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// If this is the last element, we're done
|
||||
if i == len(fieldPath)-1 {
|
||||
break
|
||||
}
|
||||
|
||||
// Only singular message fields are allowed
|
||||
if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
|
||||
return fmt.Errorf("invalid path: %q is not a message", fieldName)
|
||||
}
|
||||
|
||||
// Get the nested message
|
||||
msgValue = msgValue.Mutable(fieldDescriptor).Message()
|
||||
}
|
||||
|
||||
// Check if oneof already set
|
||||
if of := fieldDescriptor.ContainingOneof(); of != nil {
|
||||
if f := msgValue.WhichOneof(of); f != nil {
|
||||
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case fieldDescriptor.IsList():
|
||||
return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
|
||||
case fieldDescriptor.IsMap():
|
||||
return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
|
||||
}
|
||||
|
||||
if len(values) > 1 {
|
||||
return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
|
||||
}
|
||||
|
||||
return populateField(fieldDescriptor, msgValue, values[0])
|
||||
}
|
||||
|
||||
func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
|
||||
v, err := parseField(fieldDescriptor, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||
}
|
||||
|
||||
msgValue.Set(fieldDescriptor, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
|
||||
for _, value := range values {
|
||||
v, err := parseField(fieldDescriptor, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||
}
|
||||
list.Append(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
|
||||
if len(values) != 2 {
|
||||
return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
|
||||
}
|
||||
|
||||
key, err := parseField(fieldDescriptor.MapKey(), values[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||
}
|
||||
|
||||
value, err := parseField(fieldDescriptor.MapValue(), values[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||
}
|
||||
|
||||
mp.Set(key.MapKey(), value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
|
||||
switch fieldDescriptor.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
v, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return protoreflect.ValueOfBool(v), nil
|
||||
case protoreflect.EnumKind:
|
||||
enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
|
||||
switch {
|
||||
case errors.Is(err, protoregistry.NotFound):
|
||||
return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
|
||||
case err != nil:
|
||||
return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
|
||||
}
|
||||
// Look for enum by name
|
||||
v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
|
||||
if v == nil {
|
||||
i, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
|
||||
}
|
||||
// Look for enum by number
|
||||
v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i))
|
||||
if v == nil {
|
||||
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
|
||||
}
|
||||
}
|
||||
return protoreflect.ValueOfEnum(v.Number()), nil
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
v, err := strconv.ParseInt(value, 10, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return protoreflect.ValueOfInt32(int32(v)), nil
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
v, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return protoreflect.ValueOfInt64(v), nil
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
v, err := strconv.ParseUint(value, 10, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return protoreflect.ValueOfUint32(uint32(v)), nil
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
v, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return protoreflect.ValueOfUint64(v), nil
|
||||
case protoreflect.FloatKind:
|
||||
v, err := strconv.ParseFloat(value, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return protoreflect.ValueOfFloat32(float32(v)), nil
|
||||
case protoreflect.DoubleKind:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return protoreflect.ValueOfFloat64(v), nil
|
||||
case protoreflect.StringKind:
|
||||
return protoreflect.ValueOfString(value), nil
|
||||
case protoreflect.BytesKind:
|
||||
v, err := base64.URLEncoding.DecodeString(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return protoreflect.ValueOfBytes(v), nil
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
return parseMessage(fieldDescriptor.Message(), value)
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
|
||||
var msg proto.Message
|
||||
switch msgDescriptor.FullName() {
|
||||
case "google.protobuf.Timestamp":
|
||||
if value == "null" {
|
||||
break
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = timestamppb.New(t)
|
||||
case "google.protobuf.Duration":
|
||||
if value == "null" {
|
||||
break
|
||||
}
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = durationpb.New(d)
|
||||
case "google.protobuf.DoubleValue":
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.DoubleValue{Value: v}
|
||||
case "google.protobuf.FloatValue":
|
||||
v, err := strconv.ParseFloat(value, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.FloatValue{Value: float32(v)}
|
||||
case "google.protobuf.Int64Value":
|
||||
v, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.Int64Value{Value: v}
|
||||
case "google.protobuf.Int32Value":
|
||||
v, err := strconv.ParseInt(value, 10, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.Int32Value{Value: int32(v)}
|
||||
case "google.protobuf.UInt64Value":
|
||||
v, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.UInt64Value{Value: v}
|
||||
case "google.protobuf.UInt32Value":
|
||||
v, err := strconv.ParseUint(value, 10, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.UInt32Value{Value: uint32(v)}
|
||||
case "google.protobuf.BoolValue":
|
||||
v, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.BoolValue{Value: v}
|
||||
case "google.protobuf.StringValue":
|
||||
msg = &wrapperspb.StringValue{Value: value}
|
||||
case "google.protobuf.BytesValue":
|
||||
v, err := base64.URLEncoding.DecodeString(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.BytesValue{Value: v}
|
||||
case "google.protobuf.FieldMask":
|
||||
fm := &field_mask.FieldMask{}
|
||||
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
|
||||
msg = fm
|
||||
default:
|
||||
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
|
||||
}
|
||||
|
||||
return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
|
||||
}
|
27
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
Normal file
27
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "utilities",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"pattern.go",
|
||||
"readerfactory.go",
|
||||
"trie.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "utilities_test",
|
||||
size = "small",
|
||||
srcs = ["trie_test.go"],
|
||||
deps = [":utilities"],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":utilities",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
generated
vendored
Normal file
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
// Package utilities provides members for internal use in grpc-gateway.
|
||||
package utilities
|
22
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
generated
vendored
Normal file
22
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
package utilities
|
||||
|
||||
// An OpCode is a opcode of compiled path patterns.
|
||||
type OpCode int
|
||||
|
||||
// These constants are the valid values of OpCode.
|
||||
const (
|
||||
// OpNop does nothing
|
||||
OpNop = OpCode(iota)
|
||||
// OpPush pushes a component to stack
|
||||
OpPush
|
||||
// OpLitPush pushes a component to stack if it matches to the literal
|
||||
OpLitPush
|
||||
// OpPushM concatenates the remaining components and pushes it to stack
|
||||
OpPushM
|
||||
// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
|
||||
OpConcatN
|
||||
// OpCapture pops an item and binds it to the variable
|
||||
OpCapture
|
||||
// OpEnd is the least positive invalid opcode.
|
||||
OpEnd
|
||||
)
|
20
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
generated
vendored
Normal file
20
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package utilities
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
|
||||
// at the start of the stream
|
||||
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return func() io.Reader {
|
||||
return bytes.NewReader(b)
|
||||
}, nil
|
||||
}
|
174
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
generated
vendored
Normal file
174
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
package utilities
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// DoubleArray is a Double Array implementation of trie on sequences of strings.
|
||||
type DoubleArray struct {
|
||||
// Encoding keeps an encoding from string to int
|
||||
Encoding map[string]int
|
||||
// Base is the base array of Double Array
|
||||
Base []int
|
||||
// Check is the check array of Double Array
|
||||
Check []int
|
||||
}
|
||||
|
||||
// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
|
||||
func NewDoubleArray(seqs [][]string) *DoubleArray {
|
||||
da := &DoubleArray{Encoding: make(map[string]int)}
|
||||
if len(seqs) == 0 {
|
||||
return da
|
||||
}
|
||||
|
||||
encoded := registerTokens(da, seqs)
|
||||
sort.Sort(byLex(encoded))
|
||||
|
||||
root := node{row: -1, col: -1, left: 0, right: len(encoded)}
|
||||
addSeqs(da, encoded, 0, root)
|
||||
|
||||
for i := len(da.Base); i > 0; i-- {
|
||||
if da.Check[i-1] != 0 {
|
||||
da.Base = da.Base[:i]
|
||||
da.Check = da.Check[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
return da
|
||||
}
|
||||
|
||||
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
|
||||
var result [][]int
|
||||
for _, seq := range seqs {
|
||||
var encoded []int
|
||||
for _, token := range seq {
|
||||
if _, ok := da.Encoding[token]; !ok {
|
||||
da.Encoding[token] = len(da.Encoding)
|
||||
}
|
||||
encoded = append(encoded, da.Encoding[token])
|
||||
}
|
||||
result = append(result, encoded)
|
||||
}
|
||||
for i := range result {
|
||||
result[i] = append(result[i], len(da.Encoding))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type node struct {
|
||||
row, col int
|
||||
left, right int
|
||||
}
|
||||
|
||||
func (n node) value(seqs [][]int) int {
|
||||
return seqs[n.row][n.col]
|
||||
}
|
||||
|
||||
func (n node) children(seqs [][]int) []*node {
|
||||
var result []*node
|
||||
lastVal := int(-1)
|
||||
last := new(node)
|
||||
for i := n.left; i < n.right; i++ {
|
||||
if lastVal == seqs[i][n.col+1] {
|
||||
continue
|
||||
}
|
||||
last.right = i
|
||||
last = &node{
|
||||
row: i,
|
||||
col: n.col + 1,
|
||||
left: i,
|
||||
}
|
||||
result = append(result, last)
|
||||
}
|
||||
last.right = n.right
|
||||
return result
|
||||
}
|
||||
|
||||
func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
|
||||
ensureSize(da, pos)
|
||||
|
||||
children := n.children(seqs)
|
||||
var i int
|
||||
for i = 1; ; i++ {
|
||||
ok := func() bool {
|
||||
for _, child := range children {
|
||||
code := child.value(seqs)
|
||||
j := i + code
|
||||
ensureSize(da, j)
|
||||
if da.Check[j] != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}()
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
da.Base[pos] = i
|
||||
for _, child := range children {
|
||||
code := child.value(seqs)
|
||||
j := i + code
|
||||
da.Check[j] = pos + 1
|
||||
}
|
||||
terminator := len(da.Encoding)
|
||||
for _, child := range children {
|
||||
code := child.value(seqs)
|
||||
if code == terminator {
|
||||
continue
|
||||
}
|
||||
j := i + code
|
||||
addSeqs(da, seqs, j, *child)
|
||||
}
|
||||
}
|
||||
|
||||
func ensureSize(da *DoubleArray, i int) {
|
||||
for i >= len(da.Base) {
|
||||
da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
|
||||
da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
|
||||
}
|
||||
}
|
||||
|
||||
type byLex [][]int
|
||||
|
||||
func (l byLex) Len() int { return len(l) }
|
||||
func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l byLex) Less(i, j int) bool {
|
||||
si := l[i]
|
||||
sj := l[j]
|
||||
var k int
|
||||
for k = 0; k < len(si) && k < len(sj); k++ {
|
||||
if si[k] < sj[k] {
|
||||
return true
|
||||
}
|
||||
if si[k] > sj[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return k < len(sj)
|
||||
}
|
||||
|
||||
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
|
||||
func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
|
||||
if len(da.Base) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var i int
|
||||
for _, t := range seq {
|
||||
code, ok := da.Encoding[t]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
j := da.Base[i] + code
|
||||
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||
break
|
||||
}
|
||||
i = j
|
||||
}
|
||||
j := da.Base[i] + len(da.Encoding)
|
||||
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
24
vendor/github.com/uptrace/bun/extra/bunotel/LICENSE
generated
vendored
Normal file
24
vendor/github.com/uptrace/bun/extra/bunotel/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
3
vendor/github.com/uptrace/bun/extra/bunotel/README.md
generated
vendored
Normal file
3
vendor/github.com/uptrace/bun/extra/bunotel/README.md
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# OpenTelemetry instrumentation for Bun
|
||||
|
||||
See [example](../example/opentelemetry) for details.
|
32
vendor/github.com/uptrace/bun/extra/bunotel/option.go
generated
vendored
Normal file
32
vendor/github.com/uptrace/bun/extra/bunotel/option.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package bunotel
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
)
|
||||
|
||||
type Option func(h *QueryHook)
|
||||
|
||||
// WithAttributes configures attributes that are used to create a span.
|
||||
func WithAttributes(attrs ...attribute.KeyValue) Option {
|
||||
return func(h *QueryHook) {
|
||||
h.attrs = append(h.attrs, attrs...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDBName configures a db.name attribute.
|
||||
func WithDBName(name string) Option {
|
||||
return func(h *QueryHook) {
|
||||
h.attrs = append(h.attrs, semconv.DBNameKey.String(name))
|
||||
}
|
||||
}
|
||||
|
||||
// WithFormattedQueries enables formatting of the query that is added
|
||||
// as the statement attribute to the trace.
|
||||
// This means that all placeholders and arguments will be filled first
|
||||
// and the query will contain all information as sent to the database.
|
||||
func WithFormattedQueries(format bool) Option {
|
||||
return func(h *QueryHook) {
|
||||
h.formatQueries = format
|
||||
}
|
||||
}
|
188
vendor/github.com/uptrace/bun/extra/bunotel/otel.go
generated
vendored
Normal file
188
vendor/github.com/uptrace/bun/extra/bunotel/otel.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
package bunotel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect"
|
||||
"github.com/uptrace/bun/schema"
|
||||
"github.com/uptrace/opentelemetry-go-extra/otelsql"
|
||||
)
|
||||
|
||||
var (
|
||||
tracer = otel.Tracer("github.com/uptrace/bun")
|
||||
meter = global.Meter("github.com/uptrace/bun")
|
||||
|
||||
queryHistogram, _ = meter.Int64Histogram(
|
||||
"go.sql.query_timing",
|
||||
instrument.WithDescription("Timing of processed queries"),
|
||||
instrument.WithUnit("milliseconds"),
|
||||
)
|
||||
)
|
||||
|
||||
type QueryHook struct {
|
||||
attrs []attribute.KeyValue
|
||||
formatQueries bool
|
||||
}
|
||||
|
||||
var _ bun.QueryHook = (*QueryHook)(nil)
|
||||
|
||||
func NewQueryHook(opts ...Option) *QueryHook {
|
||||
h := new(QueryHook)
|
||||
for _, opt := range opts {
|
||||
opt(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *QueryHook) Init(db *bun.DB) {
|
||||
labels := make([]attribute.KeyValue, 0, len(h.attrs)+1)
|
||||
labels = append(labels, h.attrs...)
|
||||
if sys := dbSystem(db); sys.Valid() {
|
||||
labels = append(labels, sys)
|
||||
}
|
||||
|
||||
otelsql.ReportDBStatsMetrics(db.DB, otelsql.WithAttributes(labels...))
|
||||
}
|
||||
|
||||
func (h *QueryHook) BeforeQuery(ctx context.Context, event *bun.QueryEvent) context.Context {
|
||||
ctx, _ = tracer.Start(ctx, "", trace.WithSpanKind(trace.SpanKindClient))
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *QueryHook) AfterQuery(ctx context.Context, event *bun.QueryEvent) {
|
||||
operation := event.Operation()
|
||||
dbOperation := semconv.DBOperationKey.String(operation)
|
||||
|
||||
labels := make([]attribute.KeyValue, 0, len(h.attrs)+2)
|
||||
labels = append(labels, h.attrs...)
|
||||
labels = append(labels, dbOperation)
|
||||
if event.IQuery != nil {
|
||||
if tableName := event.IQuery.GetTableName(); tableName != "" {
|
||||
labels = append(labels, semconv.DBSQLTableKey.String(tableName))
|
||||
}
|
||||
}
|
||||
|
||||
queryHistogram.Record(ctx, time.Since(event.StartTime).Milliseconds(), labels...)
|
||||
|
||||
span := trace.SpanFromContext(ctx)
|
||||
if !span.IsRecording() {
|
||||
return
|
||||
}
|
||||
|
||||
span.SetName(operation)
|
||||
defer span.End()
|
||||
|
||||
query := h.eventQuery(event)
|
||||
fn, file, line := funcFileLine("github.com/uptrace/bun")
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, 10)
|
||||
attrs = append(attrs, h.attrs...)
|
||||
attrs = append(attrs,
|
||||
dbOperation,
|
||||
semconv.DBStatementKey.String(query),
|
||||
semconv.CodeFunctionKey.String(fn),
|
||||
semconv.CodeFilepathKey.String(file),
|
||||
semconv.CodeLineNumberKey.Int(line),
|
||||
)
|
||||
|
||||
if sys := dbSystem(event.DB); sys.Valid() {
|
||||
attrs = append(attrs, sys)
|
||||
}
|
||||
if event.Result != nil {
|
||||
if n, _ := event.Result.RowsAffected(); n > 0 {
|
||||
attrs = append(attrs, attribute.Int64("db.rows_affected", n))
|
||||
}
|
||||
}
|
||||
|
||||
switch event.Err {
|
||||
case nil, sql.ErrNoRows, sql.ErrTxDone:
|
||||
// ignore
|
||||
default:
|
||||
span.RecordError(event.Err)
|
||||
span.SetStatus(codes.Error, event.Err.Error())
|
||||
}
|
||||
|
||||
span.SetAttributes(attrs...)
|
||||
}
|
||||
|
||||
func funcFileLine(pkg string) (string, string, int) {
|
||||
const depth = 16
|
||||
var pcs [depth]uintptr
|
||||
n := runtime.Callers(3, pcs[:])
|
||||
ff := runtime.CallersFrames(pcs[:n])
|
||||
|
||||
var fn, file string
|
||||
var line int
|
||||
for {
|
||||
f, ok := ff.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fn, file, line = f.Function, f.File, f.Line
|
||||
if !strings.Contains(fn, pkg) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if ind := strings.LastIndexByte(fn, '/'); ind != -1 {
|
||||
fn = fn[ind+1:]
|
||||
}
|
||||
|
||||
return fn, file, line
|
||||
}
|
||||
|
||||
func (h *QueryHook) eventQuery(event *bun.QueryEvent) string {
|
||||
const softQueryLimit = 8000
|
||||
const hardQueryLimit = 16000
|
||||
|
||||
var query string
|
||||
|
||||
if h.formatQueries && len(event.Query) <= softQueryLimit {
|
||||
query = event.Query
|
||||
} else {
|
||||
query = unformattedQuery(event)
|
||||
}
|
||||
|
||||
if len(query) > hardQueryLimit {
|
||||
query = query[:hardQueryLimit]
|
||||
}
|
||||
|
||||
return query
|
||||
}
|
||||
|
||||
func unformattedQuery(event *bun.QueryEvent) string {
|
||||
if event.IQuery != nil {
|
||||
if b, err := event.IQuery.AppendQuery(schema.NewNopFormatter(), nil); err == nil {
|
||||
return bytesToString(b)
|
||||
}
|
||||
}
|
||||
return string(event.QueryTemplate)
|
||||
}
|
||||
|
||||
func dbSystem(db *bun.DB) attribute.KeyValue {
|
||||
switch db.Dialect().Name() {
|
||||
case dialect.PG:
|
||||
return semconv.DBSystemPostgreSQL
|
||||
case dialect.MySQL:
|
||||
return semconv.DBSystemMySQL
|
||||
case dialect.SQLite:
|
||||
return semconv.DBSystemSqlite
|
||||
case dialect.MSSQL:
|
||||
return semconv.DBSystemMSSQL
|
||||
default:
|
||||
return attribute.KeyValue{}
|
||||
}
|
||||
}
|
11
vendor/github.com/uptrace/bun/extra/bunotel/safe.go
generated
vendored
Normal file
11
vendor/github.com/uptrace/bun/extra/bunotel/safe.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func stringToBytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
18
vendor/github.com/uptrace/bun/extra/bunotel/unsafe.go
generated
vendored
Normal file
18
vendor/github.com/uptrace/bun/extra/bunotel/unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// +build !appengine
|
||||
|
||||
package bunotel
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
func stringToBytes(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(
|
||||
&struct {
|
||||
string
|
||||
Cap int
|
||||
}{s, len(s)},
|
||||
))
|
||||
}
|
5
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/.golangci.yml
generated
vendored
Normal file
5
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/.golangci.yml
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
issues:
|
||||
exclude-rules:
|
||||
- text: 'Drivers should implement'
|
||||
linters:
|
||||
- staticcheck
|
24
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/LICENSE
generated
vendored
Normal file
24
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
Copyright (c) 2020 github.com/uptrace/opentelemetry-go-extra Contributors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
118
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/README.md
generated
vendored
Normal file
118
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/README.md
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/opentelemetry-go-extra/otelsql)](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql)
|
||||
|
||||
# database/sql instrumentation for OpenTelemetry Go
|
||||
|
||||
[database/sql OpenTelemetry instrumentation](https://uptrace.dev/opentelemetry/instrumentations/go-database-sql.html)
|
||||
records database queries (including `Tx` and `Stmt` queries) and reports `DBStats` metrics.
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
go get github.com/uptrace/opentelemetry-go-extra/otelsql
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
To instrument database/sql, you need to connect to a database using the API provided by otelsql:
|
||||
|
||||
| sql | otelsql |
|
||||
| --------------------------- | ------------------------------- |
|
||||
| `sql.Open(driverName, dsn)` | `otelsql.Open(driverName, dsn)` |
|
||||
| `sql.OpenDB(connector)` | `otelsql.OpenDB(connector)` |
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/uptrace/opentelemetry-go-extra/otelsql"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
|
||||
)
|
||||
|
||||
db, err := otelsql.Open("sqlite", "file::memory:?cache=shared",
|
||||
otelsql.WithAttributes(semconv.DBSystemSqlite),
|
||||
otelsql.WithDBName("mydb"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// db is *sql.DB
|
||||
```
|
||||
|
||||
And then use context-aware API to propagate the active span via
|
||||
[context](https://uptrace.dev/opentelemetry/go-tracing.html#context):
|
||||
|
||||
```go
|
||||
var num int
|
||||
if err := db.QueryRowContext(ctx, "SELECT 42").Scan(&num); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
See [example](/example/) for details.
|
||||
|
||||
## Options
|
||||
|
||||
Both [otelsql.Open](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#Open) and
|
||||
[otelsql.OpenDB](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#OpenDB) accept
|
||||
the same [options](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#Option):
|
||||
|
||||
- [WithAttributes](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#WithAttributes)
|
||||
configures attributes that are used to create a span.
|
||||
- [WithDBName](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#WithDBName)
|
||||
configures a `db.name` attribute.
|
||||
- [WithDBSystem](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#WithDBSystem)
|
||||
configures a `db.system` attribute. When possible, you should prefer using WithAttributes and
|
||||
[semconv](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.10.0), for example,
|
||||
`otelsql.WithAttributes(semconv.DBSystemSqlite)`.
|
||||
|
||||
## sqlboiler
|
||||
|
||||
You can use otelsql to instrument [sqlboiler](https://github.com/volatiletech/sqlboiler) ORM:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/uptrace/opentelemetry-go-extra/otelsql"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
|
||||
)
|
||||
|
||||
db, err := otelsql.Open("postgres", "dbname=fun user=abc",
|
||||
otelsql.WithAttributes(semconv.DBSystemPostgreSQL))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
boil.SetDB(db)
|
||||
```
|
||||
|
||||
## GORM 1
|
||||
|
||||
You can use otelsql to instrument [GORM 1](https://v1.gorm.io/):
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
"github.com/uptrace/opentelemetry-go-extra/otelsql"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
|
||||
)
|
||||
|
||||
// gormOpen is like gorm.Open, but it uses otelsql to instrument the database.
|
||||
func gormOpen(driverName, dataSourceName string, opts ...otelsql.Option) (*gorm.DB, error) {
|
||||
db, err := otelsql.Open(driverName, dataSourceName, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gorm.Open(driverName, db)
|
||||
}
|
||||
|
||||
db, err := gormOpen("mysql", "user:password@/dbname",
|
||||
otelsql.WithAttributes(semconv.DBSystemMySQL))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
To instrument GORM 2, use
|
||||
[otelgorm](https://github.com/uptrace/opentelemetry-go-extra/tree/main/otelgorm).
|
||||
|
||||
## Alternatives
|
||||
|
||||
- https://github.com/XSAM/otelsql - different driver registration and no metrics.
|
||||
- https://github.com/j2gg0s/otsql - like XSAM/otelsql but with Prometheus metrics.
|
460
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/driver.go
generated
vendored
Normal file
460
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/driver.go
generated
vendored
Normal file
|
@ -0,0 +1,460 @@
|
|||
package otelsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Open is a wrapper over sql.Open that instruments the sql.DB to record executed queries
|
||||
// using OpenTelemetry API.
|
||||
func Open(driverName, dsn string, opts ...Option) (*sql.DB, error) {
|
||||
db, err := sql.Open(driverName, dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return patchDB(db, dsn, opts...)
|
||||
}
|
||||
|
||||
func patchDB(db *sql.DB, dsn string, opts ...Option) (*sql.DB, error) {
|
||||
dbDriver := db.Driver()
|
||||
d := newDriver(dbDriver, opts)
|
||||
|
||||
if _, ok := dbDriver.(driver.DriverContext); ok {
|
||||
connector, err := d.OpenConnector(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sqlOpenDB(connector, d.instrum), nil
|
||||
}
|
||||
|
||||
return sqlOpenDB(&dsnConnector{
|
||||
driver: d,
|
||||
dsn: dsn,
|
||||
}, d.instrum), nil
|
||||
}
|
||||
|
||||
// OpenDB is a wrapper over sql.OpenDB that instruments the sql.DB to record executed queries
|
||||
// using OpenTelemetry API.
|
||||
func OpenDB(connector driver.Connector, opts ...Option) *sql.DB {
|
||||
instrum := newDBInstrum(opts)
|
||||
c := newConnector(connector.Driver(), connector, instrum)
|
||||
return sqlOpenDB(c, instrum)
|
||||
}
|
||||
|
||||
func sqlOpenDB(connector driver.Connector, instrum *dbInstrum) *sql.DB {
|
||||
db := sql.OpenDB(connector)
|
||||
ReportDBStatsMetrics(db, WithMeterProvider(instrum.meterProvider), WithAttributes(instrum.attrs...))
|
||||
return db
|
||||
}
|
||||
|
||||
type dsnConnector struct {
|
||||
driver *otelDriver
|
||||
dsn string
|
||||
}
|
||||
|
||||
func (c *dsnConnector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
var conn driver.Conn
|
||||
err := c.driver.instrum.withSpan(ctx, "db.Connect", "",
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
var err error
|
||||
conn, err = c.driver.Open(c.dsn)
|
||||
return err
|
||||
})
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func (c *dsnConnector) Driver() driver.Driver {
|
||||
return c.driver
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type otelDriver struct {
|
||||
driver driver.Driver
|
||||
driverCtx driver.DriverContext
|
||||
instrum *dbInstrum
|
||||
}
|
||||
|
||||
var _ driver.DriverContext = (*otelDriver)(nil)
|
||||
|
||||
func newDriver(dr driver.Driver, opts []Option) *otelDriver {
|
||||
driverCtx, _ := dr.(driver.DriverContext)
|
||||
d := &otelDriver{
|
||||
driver: dr,
|
||||
driverCtx: driverCtx,
|
||||
instrum: newDBInstrum(opts),
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *otelDriver) Open(name string) (driver.Conn, error) {
|
||||
conn, err := d.driver.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newConn(conn, d.instrum), nil
|
||||
}
|
||||
|
||||
func (d *otelDriver) OpenConnector(dsn string) (driver.Connector, error) {
|
||||
connector, err := d.driverCtx.OpenConnector(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newConnector(d, connector, d.instrum), nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type connector struct {
|
||||
driver.Connector
|
||||
driver driver.Driver
|
||||
instrum *dbInstrum
|
||||
}
|
||||
|
||||
var _ driver.Connector = (*connector)(nil)
|
||||
|
||||
func newConnector(d driver.Driver, c driver.Connector, instrum *dbInstrum) *connector {
|
||||
return &connector{
|
||||
driver: d,
|
||||
Connector: c,
|
||||
instrum: instrum,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
var conn driver.Conn
|
||||
if err := c.instrum.withSpan(ctx, "db.Connect", "",
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
var err error
|
||||
conn, err = c.Connector.Connect(ctx)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newConn(conn, c.instrum), nil
|
||||
}
|
||||
|
||||
func (c *connector) Driver() driver.Driver {
|
||||
return c.driver
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type otelConn struct {
|
||||
driver.Conn
|
||||
|
||||
instrum *dbInstrum
|
||||
|
||||
ping pingFunc
|
||||
exec execFunc
|
||||
execCtx execCtxFunc
|
||||
query queryFunc
|
||||
queryCtx queryCtxFunc
|
||||
prepareCtx prepareCtxFunc
|
||||
beginTx beginTxFunc
|
||||
resetSession resetSessionFunc
|
||||
checkNamedValue checkNamedValueFunc
|
||||
}
|
||||
|
||||
var _ driver.Conn = (*otelConn)(nil)
|
||||
|
||||
func newConn(conn driver.Conn, instrum *dbInstrum) *otelConn {
|
||||
cn := &otelConn{
|
||||
Conn: conn,
|
||||
instrum: instrum,
|
||||
}
|
||||
|
||||
cn.ping = cn.createPingFunc(conn)
|
||||
cn.exec = cn.createExecFunc(conn)
|
||||
cn.execCtx = cn.createExecCtxFunc(conn)
|
||||
cn.query = cn.createQueryFunc(conn)
|
||||
cn.queryCtx = cn.createQueryCtxFunc(conn)
|
||||
cn.prepareCtx = cn.createPrepareCtxFunc(conn)
|
||||
cn.beginTx = cn.createBeginTxFunc(conn)
|
||||
cn.resetSession = cn.createResetSessionFunc(conn)
|
||||
cn.checkNamedValue = cn.createCheckNamedValueFunc(conn)
|
||||
|
||||
return cn
|
||||
}
|
||||
|
||||
var _ driver.Pinger = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) Ping(ctx context.Context) error {
|
||||
return c.ping(ctx)
|
||||
}
|
||||
|
||||
type pingFunc func(ctx context.Context) error
|
||||
|
||||
func (c *otelConn) createPingFunc(conn driver.Conn) pingFunc {
|
||||
if pinger, ok := conn.(driver.Pinger); ok {
|
||||
return func(ctx context.Context) error {
|
||||
return c.instrum.withSpan(ctx, "db.Ping", "",
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
return pinger.Ping(ctx)
|
||||
})
|
||||
}
|
||||
}
|
||||
return func(ctx context.Context) error {
|
||||
return driver.ErrSkip
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.Execer = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) Exec(query string, args []driver.Value) (driver.Result, error) {
|
||||
return c.exec(query, args)
|
||||
}
|
||||
|
||||
type execFunc func(query string, args []driver.Value) (driver.Result, error)
|
||||
|
||||
func (c *otelConn) createExecFunc(conn driver.Conn) execFunc {
|
||||
if execer, ok := conn.(driver.Execer); ok {
|
||||
return func(query string, args []driver.Value) (driver.Result, error) {
|
||||
return execer.Exec(query, args)
|
||||
}
|
||||
}
|
||||
return func(query string, args []driver.Value) (driver.Result, error) {
|
||||
return nil, driver.ErrSkip
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.ExecerContext = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) ExecContext(
|
||||
ctx context.Context, query string, args []driver.NamedValue,
|
||||
) (driver.Result, error) {
|
||||
return c.execCtx(ctx, query, args)
|
||||
}
|
||||
|
||||
type execCtxFunc func(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error)
|
||||
|
||||
func (c *otelConn) createExecCtxFunc(conn driver.Conn) execCtxFunc {
|
||||
var fn execCtxFunc
|
||||
|
||||
if execer, ok := conn.(driver.ExecerContext); ok {
|
||||
fn = execer.ExecContext
|
||||
} else {
|
||||
fn = func(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
|
||||
vArgs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.exec(query, vArgs)
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
|
||||
var res driver.Result
|
||||
if err := c.instrum.withSpan(ctx, "db.Exec", query,
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
var err error
|
||||
res, err = fn(ctx, query, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if span.IsRecording() {
|
||||
rows, err := res.RowsAffected()
|
||||
if err == nil {
|
||||
span.SetAttributes(dbRowsAffected.Int64(rows))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.Queryer = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) Query(query string, args []driver.Value) (driver.Rows, error) {
|
||||
return c.query(query, args)
|
||||
}
|
||||
|
||||
type queryFunc func(query string, args []driver.Value) (driver.Rows, error)
|
||||
|
||||
func (c *otelConn) createQueryFunc(conn driver.Conn) queryFunc {
|
||||
if queryer, ok := c.Conn.(driver.Queryer); ok {
|
||||
return func(query string, args []driver.Value) (driver.Rows, error) {
|
||||
return queryer.Query(query, args)
|
||||
}
|
||||
}
|
||||
return func(query string, args []driver.Value) (driver.Rows, error) {
|
||||
return nil, driver.ErrSkip
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.QueryerContext = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) QueryContext(
|
||||
ctx context.Context, query string, args []driver.NamedValue,
|
||||
) (driver.Rows, error) {
|
||||
return c.queryCtx(ctx, query, args)
|
||||
}
|
||||
|
||||
type queryCtxFunc func(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error)
|
||||
|
||||
func (c *otelConn) createQueryCtxFunc(conn driver.Conn) queryCtxFunc {
|
||||
var fn queryCtxFunc
|
||||
|
||||
if queryer, ok := c.Conn.(driver.QueryerContext); ok {
|
||||
fn = queryer.QueryContext
|
||||
} else {
|
||||
fn = func(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
|
||||
vArgs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.query(query, vArgs)
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
|
||||
var rows driver.Rows
|
||||
err := c.instrum.withSpan(ctx, "db.Query", query,
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
var err error
|
||||
rows, err = fn(ctx, query, args)
|
||||
return err
|
||||
})
|
||||
return rows, err
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.ConnPrepareContext = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
return c.prepareCtx(ctx, query)
|
||||
}
|
||||
|
||||
type prepareCtxFunc func(ctx context.Context, query string) (driver.Stmt, error)
|
||||
|
||||
func (c *otelConn) createPrepareCtxFunc(conn driver.Conn) prepareCtxFunc {
|
||||
var fn prepareCtxFunc
|
||||
|
||||
if preparer, ok := c.Conn.(driver.ConnPrepareContext); ok {
|
||||
fn = preparer.PrepareContext
|
||||
} else {
|
||||
fn = func(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
return c.Conn.Prepare(query)
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
var stmt driver.Stmt
|
||||
if err := c.instrum.withSpan(ctx, "db.Prepare", query,
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
var err error
|
||||
stmt, err = fn(ctx, query)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newStmt(stmt, query, c.instrum), nil
|
||||
}
|
||||
}
|
||||
|
||||
var _ driver.ConnBeginTx = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
return c.beginTx(ctx, opts)
|
||||
}
|
||||
|
||||
type beginTxFunc func(ctx context.Context, opts driver.TxOptions) (driver.Tx, error)
|
||||
|
||||
func (c *otelConn) createBeginTxFunc(conn driver.Conn) beginTxFunc {
|
||||
var fn beginTxFunc
|
||||
|
||||
if txor, ok := conn.(driver.ConnBeginTx); ok {
|
||||
fn = txor.BeginTx
|
||||
} else {
|
||||
fn = func(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
return conn.Begin()
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
var tx driver.Tx
|
||||
if err := c.instrum.withSpan(ctx, "db.Begin", "",
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
var err error
|
||||
tx, err = fn(ctx, opts)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTx(ctx, tx, c.instrum), nil
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.SessionResetter = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) ResetSession(ctx context.Context) error {
|
||||
return c.resetSession(ctx)
|
||||
}
|
||||
|
||||
type resetSessionFunc func(ctx context.Context) error
|
||||
|
||||
func (c *otelConn) createResetSessionFunc(conn driver.Conn) resetSessionFunc {
|
||||
if resetter, ok := c.Conn.(driver.SessionResetter); ok {
|
||||
return func(ctx context.Context) error {
|
||||
return resetter.ResetSession(ctx)
|
||||
}
|
||||
}
|
||||
return func(ctx context.Context) error {
|
||||
return driver.ErrSkip
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.NamedValueChecker = (*otelConn)(nil)
|
||||
|
||||
func (c *otelConn) CheckNamedValue(value *driver.NamedValue) error {
|
||||
return c.checkNamedValue(value)
|
||||
}
|
||||
|
||||
type checkNamedValueFunc func(*driver.NamedValue) error
|
||||
|
||||
func (c *otelConn) createCheckNamedValueFunc(conn driver.Conn) checkNamedValueFunc {
|
||||
if checker, ok := c.Conn.(driver.NamedValueChecker); ok {
|
||||
return func(value *driver.NamedValue) error {
|
||||
return checker.CheckNamedValue(value)
|
||||
}
|
||||
}
|
||||
return func(value *driver.NamedValue) error {
|
||||
return driver.ErrSkip
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
|
||||
args := make([]driver.Value, len(named))
|
||||
for n, param := range named {
|
||||
if len(param.Name) > 0 {
|
||||
return nil, errors.New("otelsql: driver does not support named parameters")
|
||||
}
|
||||
args[n] = param.Value
|
||||
}
|
||||
return args, nil
|
||||
}
|
254
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/otel.go
generated
vendored
Normal file
254
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/otel.go
generated
vendored
Normal file
|
@ -0,0 +1,254 @@
|
|||
package otelsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const instrumName = "github.com/uptrace/opentelemetry-go-extra/otelsql"
|
||||
|
||||
var dbRowsAffected = attribute.Key("db.rows_affected")
|
||||
|
||||
type config struct {
|
||||
tracerProvider trace.TracerProvider
|
||||
tracer trace.Tracer //nolint:structcheck
|
||||
|
||||
meterProvider metric.MeterProvider
|
||||
meter metric.Meter
|
||||
|
||||
attrs []attribute.KeyValue
|
||||
|
||||
queryFormatter func(query string) string
|
||||
}
|
||||
|
||||
func newConfig(opts []Option) *config {
|
||||
c := &config{
|
||||
tracerProvider: otel.GetTracerProvider(),
|
||||
meterProvider: global.MeterProvider(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *config) formatQuery(query string) string {
|
||||
if c.queryFormatter != nil {
|
||||
return c.queryFormatter(query)
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
type dbInstrum struct {
|
||||
*config
|
||||
|
||||
queryHistogram instrument.Int64Histogram
|
||||
}
|
||||
|
||||
func newDBInstrum(opts []Option) *dbInstrum {
|
||||
t := &dbInstrum{
|
||||
config: newConfig(opts),
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
t.tracer = t.tracerProvider.Tracer(instrumName)
|
||||
}
|
||||
if t.meter == nil {
|
||||
t.meter = t.meterProvider.Meter(instrumName)
|
||||
}
|
||||
|
||||
var err error
|
||||
t.queryHistogram, err = t.meter.Int64Histogram(
|
||||
"go.sql.query_timing",
|
||||
instrument.WithDescription("Timing of processed queries"),
|
||||
instrument.WithUnit("milliseconds"),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *dbInstrum) withSpan(
|
||||
ctx context.Context,
|
||||
spanName string,
|
||||
query string,
|
||||
fn func(ctx context.Context, span trace.Span) error,
|
||||
) error {
|
||||
var startTime time.Time
|
||||
if query != "" {
|
||||
startTime = time.Now()
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, len(t.attrs)+1)
|
||||
attrs = append(attrs, t.attrs...)
|
||||
if query != "" {
|
||||
attrs = append(attrs, semconv.DBStatementKey.String(t.formatQuery(query)))
|
||||
}
|
||||
|
||||
ctx, span := t.tracer.Start(ctx, spanName,
|
||||
trace.WithSpanKind(trace.SpanKindClient),
|
||||
trace.WithAttributes(attrs...))
|
||||
err := fn(ctx, span)
|
||||
span.End()
|
||||
|
||||
if query != "" {
|
||||
t.queryHistogram.Record(ctx, time.Since(startTime).Milliseconds(), t.attrs...)
|
||||
}
|
||||
|
||||
if !span.IsRecording() {
|
||||
return err
|
||||
}
|
||||
|
||||
switch err {
|
||||
case nil,
|
||||
driver.ErrSkip,
|
||||
io.EOF, // end of rows iterator
|
||||
sql.ErrNoRows:
|
||||
// ignore
|
||||
default:
|
||||
span.RecordError(err)
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type Option func(c *config)
|
||||
|
||||
// WithTracerProvider configures a tracer provider that is used to create a tracer.
|
||||
func WithTracerProvider(tracerProvider trace.TracerProvider) Option {
|
||||
return func(c *config) {
|
||||
c.tracerProvider = tracerProvider
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttributes configures attributes that are used to create a span.
|
||||
func WithAttributes(attrs ...attribute.KeyValue) Option {
|
||||
return func(c *config) {
|
||||
c.attrs = append(c.attrs, attrs...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDBSystem configures a db.system attribute. You should prefer using
|
||||
// WithAttributes and semconv, for example, `otelsql.WithAttributes(semconv.DBSystemSqlite)`.
|
||||
func WithDBSystem(system string) Option {
|
||||
return func(c *config) {
|
||||
c.attrs = append(c.attrs, semconv.DBSystemKey.String(system))
|
||||
}
|
||||
}
|
||||
|
||||
// WithDBName configures a db.name attribute.
|
||||
func WithDBName(name string) Option {
|
||||
return func(c *config) {
|
||||
c.attrs = append(c.attrs, semconv.DBNameKey.String(name))
|
||||
}
|
||||
}
|
||||
|
||||
// WithMeterProvider configures a metric.Meter used to create instruments.
|
||||
func WithMeterProvider(meterProvider metric.MeterProvider) Option {
|
||||
return func(c *config) {
|
||||
c.meterProvider = meterProvider
|
||||
}
|
||||
}
|
||||
|
||||
// WithQueryFormatter configures a query formatter
|
||||
func WithQueryFormatter(queryFormatter func(query string) string) Option {
|
||||
return func(c *config) {
|
||||
c.queryFormatter = queryFormatter
|
||||
}
|
||||
}
|
||||
|
||||
// ReportDBStatsMetrics reports DBStats metrics using OpenTelemetry Metrics API.
|
||||
func ReportDBStatsMetrics(db *sql.DB, opts ...Option) {
|
||||
cfg := newConfig(opts)
|
||||
|
||||
if cfg.meter == nil {
|
||||
cfg.meter = cfg.meterProvider.Meter(instrumName)
|
||||
}
|
||||
|
||||
meter := cfg.meter
|
||||
labels := cfg.attrs
|
||||
|
||||
maxOpenConns, _ := meter.Int64ObservableGauge(
|
||||
"go.sql.connections_max_open",
|
||||
instrument.WithDescription("Maximum number of open connections to the database"),
|
||||
)
|
||||
openConns, _ := meter.Int64ObservableGauge(
|
||||
"go.sql.connections_open",
|
||||
instrument.WithDescription("The number of established connections both in use and idle"),
|
||||
)
|
||||
inUseConns, _ := meter.Int64ObservableGauge(
|
||||
"go.sql.connections_in_use",
|
||||
instrument.WithDescription("The number of connections currently in use"),
|
||||
)
|
||||
idleConns, _ := meter.Int64ObservableGauge(
|
||||
"go.sql.connections_idle",
|
||||
instrument.WithDescription("The number of idle connections"),
|
||||
)
|
||||
connsWaitCount, _ := meter.Int64ObservableCounter(
|
||||
"go.sql.connections_wait_count",
|
||||
instrument.WithDescription("The total number of connections waited for"),
|
||||
)
|
||||
connsWaitDuration, _ := meter.Int64ObservableCounter(
|
||||
"go.sql.connections_wait_duration",
|
||||
instrument.WithDescription("The total time blocked waiting for a new connection"),
|
||||
instrument.WithUnit("nanoseconds"),
|
||||
)
|
||||
connsClosedMaxIdle, _ := meter.Int64ObservableCounter(
|
||||
"go.sql.connections_closed_max_idle",
|
||||
instrument.WithDescription("The total number of connections closed due to SetMaxIdleConns"),
|
||||
)
|
||||
connsClosedMaxIdleTime, _ := meter.Int64ObservableCounter(
|
||||
"go.sql.connections_closed_max_idle_time",
|
||||
instrument.WithDescription("The total number of connections closed due to SetConnMaxIdleTime"),
|
||||
)
|
||||
connsClosedMaxLifetime, _ := meter.Int64ObservableCounter(
|
||||
"go.sql.connections_closed_max_lifetime",
|
||||
instrument.WithDescription("The total number of connections closed due to SetConnMaxLifetime"),
|
||||
)
|
||||
|
||||
if _, err := meter.RegisterCallback(
|
||||
func(ctx context.Context, o metric.Observer) error {
|
||||
stats := db.Stats()
|
||||
|
||||
o.ObserveInt64(maxOpenConns, int64(stats.MaxOpenConnections), labels...)
|
||||
|
||||
o.ObserveInt64(openConns, int64(stats.OpenConnections), labels...)
|
||||
o.ObserveInt64(inUseConns, int64(stats.InUse), labels...)
|
||||
o.ObserveInt64(idleConns, int64(stats.Idle), labels...)
|
||||
|
||||
o.ObserveInt64(connsWaitCount, stats.WaitCount, labels...)
|
||||
o.ObserveInt64(connsWaitDuration, int64(stats.WaitDuration), labels...)
|
||||
o.ObserveInt64(connsClosedMaxIdle, stats.MaxIdleClosed, labels...)
|
||||
o.ObserveInt64(connsClosedMaxIdleTime, stats.MaxIdleTimeClosed, labels...)
|
||||
o.ObserveInt64(connsClosedMaxLifetime, stats.MaxLifetimeClosed, labels...)
|
||||
|
||||
return nil
|
||||
},
|
||||
maxOpenConns,
|
||||
openConns,
|
||||
inUseConns,
|
||||
idleConns,
|
||||
connsWaitCount,
|
||||
connsWaitDuration,
|
||||
connsClosedMaxIdle,
|
||||
connsClosedMaxIdleTime,
|
||||
connsClosedMaxLifetime,
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
120
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/stmt.go
generated
vendored
Normal file
120
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/stmt.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
package otelsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type otelStmt struct {
|
||||
driver.Stmt
|
||||
|
||||
query string
|
||||
instrum *dbInstrum
|
||||
|
||||
execCtx stmtExecCtxFunc
|
||||
queryCtx stmtQueryCtxFunc
|
||||
}
|
||||
|
||||
var _ driver.Stmt = (*otelStmt)(nil)
|
||||
|
||||
func newStmt(stmt driver.Stmt, query string, instrum *dbInstrum) *otelStmt {
|
||||
s := &otelStmt{
|
||||
Stmt: stmt,
|
||||
query: query,
|
||||
instrum: instrum,
|
||||
}
|
||||
s.execCtx = s.createExecCtxFunc(stmt)
|
||||
s.queryCtx = s.createQueryCtxFunc(stmt)
|
||||
return s
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.StmtExecContext = (*otelStmt)(nil)
|
||||
|
||||
func (stmt *otelStmt) ExecContext(
|
||||
ctx context.Context, args []driver.NamedValue,
|
||||
) (driver.Result, error) {
|
||||
return stmt.execCtx(ctx, args)
|
||||
}
|
||||
|
||||
type stmtExecCtxFunc func(ctx context.Context, args []driver.NamedValue) (driver.Result, error)
|
||||
|
||||
func (s *otelStmt) createExecCtxFunc(stmt driver.Stmt) stmtExecCtxFunc {
|
||||
var fn stmtExecCtxFunc
|
||||
|
||||
if execer, ok := s.Stmt.(driver.StmtExecContext); ok {
|
||||
fn = execer.ExecContext
|
||||
} else {
|
||||
fn = func(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
vArgs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return stmt.Exec(vArgs)
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
var res driver.Result
|
||||
err := s.instrum.withSpan(ctx, "stmt.Exec", s.query,
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
var err error
|
||||
res, err = fn(ctx, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if span.IsRecording() {
|
||||
rows, err := res.RowsAffected()
|
||||
if err == nil {
|
||||
span.SetAttributes(dbRowsAffected.Int64(rows))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ driver.StmtQueryContext = (*otelStmt)(nil)
|
||||
|
||||
func (stmt *otelStmt) QueryContext(
|
||||
ctx context.Context, args []driver.NamedValue,
|
||||
) (driver.Rows, error) {
|
||||
return stmt.queryCtx(ctx, args)
|
||||
}
|
||||
|
||||
type stmtQueryCtxFunc func(ctx context.Context, args []driver.NamedValue) (driver.Rows, error)
|
||||
|
||||
func (s *otelStmt) createQueryCtxFunc(stmt driver.Stmt) stmtQueryCtxFunc {
|
||||
var fn stmtQueryCtxFunc
|
||||
|
||||
if queryer, ok := s.Stmt.(driver.StmtQueryContext); ok {
|
||||
fn = queryer.QueryContext
|
||||
} else {
|
||||
fn = func(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
vArgs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.Query(vArgs)
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
var rows driver.Rows
|
||||
err := s.instrum.withSpan(ctx, "stmt.Query", s.query,
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
var err error
|
||||
rows, err = fn(ctx, args)
|
||||
return err
|
||||
})
|
||||
return rows, err
|
||||
}
|
||||
}
|
38
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/tx.go
generated
vendored
Normal file
38
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/tx.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package otelsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type otelTx struct {
|
||||
ctx context.Context
|
||||
tx driver.Tx
|
||||
instrum *dbInstrum
|
||||
}
|
||||
|
||||
var _ driver.Tx = (*otelTx)(nil)
|
||||
|
||||
func newTx(ctx context.Context, tx driver.Tx, instrum *dbInstrum) *otelTx {
|
||||
return &otelTx{
|
||||
ctx: ctx,
|
||||
tx: tx,
|
||||
instrum: instrum,
|
||||
}
|
||||
}
|
||||
|
||||
func (tx *otelTx) Commit() error {
|
||||
return tx.instrum.withSpan(tx.ctx, "tx.Commit", "",
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
return tx.tx.Commit()
|
||||
})
|
||||
}
|
||||
|
||||
func (tx *otelTx) Rollback() error {
|
||||
return tx.instrum.withSpan(tx.ctx, "tx.Rollback", "",
|
||||
func(ctx context.Context, span trace.Span) error {
|
||||
return tx.tx.Rollback()
|
||||
})
|
||||
}
|
6
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/version.go
generated
vendored
Normal file
6
vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/version.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
package otelsql
|
||||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "0.1.21"
|
||||
}
|
201
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
21
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/doc.go
generated
vendored
Normal file
21
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package otelgin instruments the github.com/gin-gonic/gin package.
|
||||
//
|
||||
// Currently there are two ways the code can be instrumented. One is
|
||||
// instrumenting the routing of a received message (the Middleware function)
|
||||
// and instrumenting the response generation through template evaluation (the
|
||||
// HTML function).
|
||||
package otelgin // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
|
142
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/gintrace.go
generated
vendored
Normal file
142
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/gintrace.go
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Based on https://github.com/DataDog/dd-trace-go/blob/8fb554ff7cf694267f9077ae35e27ce4689ed8b6/contrib/gin-gonic/gin/gintrace.go
|
||||
|
||||
package otelgin // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||
"go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
|
||||
oteltrace "go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
tracerKey = "otel-go-contrib-tracer"
|
||||
tracerName = "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
|
||||
)
|
||||
|
||||
// Middleware returns middleware that will trace incoming requests.
|
||||
// The service parameter should describe the name of the (virtual)
|
||||
// server handling the request.
|
||||
func Middleware(service string, opts ...Option) gin.HandlerFunc {
|
||||
cfg := config{}
|
||||
for _, opt := range opts {
|
||||
opt.apply(&cfg)
|
||||
}
|
||||
if cfg.TracerProvider == nil {
|
||||
cfg.TracerProvider = otel.GetTracerProvider()
|
||||
}
|
||||
tracer := cfg.TracerProvider.Tracer(
|
||||
tracerName,
|
||||
oteltrace.WithInstrumentationVersion(SemVersion()),
|
||||
)
|
||||
if cfg.Propagators == nil {
|
||||
cfg.Propagators = otel.GetTextMapPropagator()
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
for _, f := range cfg.Filters {
|
||||
if !f(c.Request) {
|
||||
// Serve the request to the next middleware
|
||||
// if a filter rejects the request.
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
}
|
||||
c.Set(tracerKey, tracer)
|
||||
savedCtx := c.Request.Context()
|
||||
defer func() {
|
||||
c.Request = c.Request.WithContext(savedCtx)
|
||||
}()
|
||||
ctx := cfg.Propagators.Extract(savedCtx, propagation.HeaderCarrier(c.Request.Header))
|
||||
opts := []oteltrace.SpanStartOption{
|
||||
oteltrace.WithAttributes(httpconv.ServerRequest(service, c.Request)...),
|
||||
oteltrace.WithSpanKind(oteltrace.SpanKindServer),
|
||||
}
|
||||
var spanName string
|
||||
if cfg.SpanNameFormatter == nil {
|
||||
spanName = c.FullPath()
|
||||
} else {
|
||||
spanName = cfg.SpanNameFormatter(c.Request)
|
||||
}
|
||||
if spanName == "" {
|
||||
spanName = fmt.Sprintf("HTTP %s route not found", c.Request.Method)
|
||||
} else {
|
||||
rAttr := semconv.HTTPRoute(spanName)
|
||||
opts = append(opts, oteltrace.WithAttributes(rAttr))
|
||||
}
|
||||
ctx, span := tracer.Start(ctx, spanName, opts...)
|
||||
defer span.End()
|
||||
|
||||
// pass the span through the request context
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
|
||||
// serve the request to the next middleware
|
||||
c.Next()
|
||||
|
||||
status := c.Writer.Status()
|
||||
span.SetStatus(httpconv.ServerStatus(status))
|
||||
if status > 0 {
|
||||
span.SetAttributes(semconv.HTTPStatusCode(status))
|
||||
}
|
||||
if len(c.Errors) > 0 {
|
||||
span.SetAttributes(attribute.String("gin.errors", c.Errors.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HTML will trace the rendering of the template as a child of the
|
||||
// span in the given context. This is a replacement for
|
||||
// gin.Context.HTML function - it invokes the original function after
|
||||
// setting up the span.
|
||||
func HTML(c *gin.Context, code int, name string, obj interface{}) {
|
||||
var tracer oteltrace.Tracer
|
||||
tracerInterface, ok := c.Get(tracerKey)
|
||||
if ok {
|
||||
tracer, ok = tracerInterface.(oteltrace.Tracer)
|
||||
}
|
||||
if !ok {
|
||||
tracer = otel.GetTracerProvider().Tracer(
|
||||
tracerName,
|
||||
oteltrace.WithInstrumentationVersion(SemVersion()),
|
||||
)
|
||||
}
|
||||
savedContext := c.Request.Context()
|
||||
defer func() {
|
||||
c.Request = c.Request.WithContext(savedContext)
|
||||
}()
|
||||
opt := oteltrace.WithAttributes(attribute.String("go.template", name))
|
||||
_, span := tracer.Start(savedContext, "gin.renderer.html", opt)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err := fmt.Errorf("error rendering template:%s: %s", name, r)
|
||||
span.RecordError(err)
|
||||
span.SetStatus(codes.Error, "template failure")
|
||||
span.End()
|
||||
panic(r)
|
||||
} else {
|
||||
span.End()
|
||||
}
|
||||
}()
|
||||
c.HTML(code, name, obj)
|
||||
}
|
90
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/option.go
generated
vendored
Normal file
90
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/option.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Based on https://github.com/DataDog/dd-trace-go/blob/8fb554ff7cf694267f9077ae35e27ce4689ed8b6/contrib/gin-gonic/gin/option.go
|
||||
|
||||
package otelgin // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
oteltrace "go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
TracerProvider oteltrace.TracerProvider
|
||||
Propagators propagation.TextMapPropagator
|
||||
Filters []Filter
|
||||
SpanNameFormatter SpanNameFormatter
|
||||
}
|
||||
|
||||
// Filter is a predicate used to determine whether a given http.request should
|
||||
// be traced. A Filter must return true if the request should be traced.
|
||||
type Filter func(*http.Request) bool
|
||||
|
||||
// SpanNameFormatter is used to set span name by http.request.
|
||||
type SpanNameFormatter func(r *http.Request) string
|
||||
|
||||
// Option specifies instrumentation configuration options.
|
||||
type Option interface {
|
||||
apply(*config)
|
||||
}
|
||||
|
||||
type optionFunc func(*config)
|
||||
|
||||
func (o optionFunc) apply(c *config) {
|
||||
o(c)
|
||||
}
|
||||
|
||||
// WithPropagators specifies propagators to use for extracting
|
||||
// information from the HTTP requests. If none are specified, global
|
||||
// ones will be used.
|
||||
func WithPropagators(propagators propagation.TextMapPropagator) Option {
|
||||
return optionFunc(func(cfg *config) {
|
||||
if propagators != nil {
|
||||
cfg.Propagators = propagators
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithTracerProvider specifies a tracer provider to use for creating a tracer.
|
||||
// If none is specified, the global provider is used.
|
||||
func WithTracerProvider(provider oteltrace.TracerProvider) Option {
|
||||
return optionFunc(func(cfg *config) {
|
||||
if provider != nil {
|
||||
cfg.TracerProvider = provider
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithFilter adds a filter to the list of filters used by the handler.
|
||||
// If any filter indicates to exclude a request then the request will not be
|
||||
// traced. All filters must allow a request to be traced for a Span to be created.
|
||||
// If no filters are provided then all requests are traced.
|
||||
// Filters will be invoked for each processed request, it is advised to make them
|
||||
// simple and fast.
|
||||
func WithFilter(f ...Filter) Option {
|
||||
return optionFunc(func(c *config) {
|
||||
c.Filters = append(c.Filters, f...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithSpanNameFormatter takes a function that will be called on every
|
||||
// request and the returned string will become the Span Name.
|
||||
func WithSpanNameFormatter(f func(r *http.Request) string) Option {
|
||||
return optionFunc(func(c *config) {
|
||||
c.SpanNameFormatter = f
|
||||
})
|
||||
}
|
26
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/version.go
generated
vendored
Normal file
26
vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/version.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otelgin // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
|
||||
|
||||
// Version is the current release version of the gin instrumentation.
|
||||
func Version() string {
|
||||
return "0.40.0"
|
||||
// This string is updated by the pre_release.sh script during release
|
||||
}
|
||||
|
||||
// SemVersion is the semantic version to be supplied to tracer/meter creation.
|
||||
func SemVersion() string {
|
||||
return "semver:" + Version()
|
||||
}
|
3
vendor/go.opentelemetry.io/otel/.gitattributes
generated
vendored
Normal file
3
vendor/go.opentelemetry.io/otel/.gitattributes
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
* text=auto eol=lf
|
||||
*.{cmd,[cC][mM][dD]} text eol=crlf
|
||||
*.{bat,[bB][aA][tT]} text eol=crlf
|
21
vendor/go.opentelemetry.io/otel/.gitignore
generated
vendored
Normal file
21
vendor/go.opentelemetry.io/otel/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
.tools/
|
||||
.idea/
|
||||
.vscode/
|
||||
*.iml
|
||||
*.so
|
||||
coverage.*
|
||||
|
||||
gen/
|
||||
|
||||
/example/fib/fib
|
||||
/example/fib/traces.txt
|
||||
/example/jaeger/jaeger
|
||||
/example/namedtracer/namedtracer
|
||||
/example/opencensus/opencensus
|
||||
/example/passthrough/passthrough
|
||||
/example/prometheus/prometheus
|
||||
/example/zipkin/zipkin
|
||||
/example/otel-collector/otel-collector
|
3
vendor/go.opentelemetry.io/otel/.gitmodules
generated
vendored
Normal file
3
vendor/go.opentelemetry.io/otel/.gitmodules
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
[submodule "opentelemetry-proto"]
|
||||
path = exporters/otlp/internal/opentelemetry-proto
|
||||
url = https://github.com/open-telemetry/opentelemetry-proto
|
244
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
Normal file
244
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
Normal file
|
@ -0,0 +1,244 @@
|
|||
# See https://github.com/golangci/golangci-lint#config-file
|
||||
run:
|
||||
issues-exit-code: 1 #Default
|
||||
tests: true #Default
|
||||
|
||||
linters:
|
||||
# Disable everything by default so upgrades to not include new "default
|
||||
# enabled" linters.
|
||||
disable-all: true
|
||||
# Specifically enable linters we want to use.
|
||||
enable:
|
||||
- depguard
|
||||
- errcheck
|
||||
- godot
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
|
||||
issues:
|
||||
# Maximum issues count per one linter.
|
||||
# Set to 0 to disable.
|
||||
# Default: 50
|
||||
# Setting to unlimited so the linter only is run once to debug all issues.
|
||||
max-issues-per-linter: 0
|
||||
# Maximum count of issues with the same text.
|
||||
# Set to 0 to disable.
|
||||
# Default: 3
|
||||
# Setting to unlimited so the linter only is run once to debug all issues.
|
||||
max-same-issues: 0
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source.
|
||||
exclude-rules:
|
||||
# TODO: Having appropriate comments for exported objects helps development,
|
||||
# even for objects in internal packages. Appropriate comments for all
|
||||
# exported objects should be added and this exclusion removed.
|
||||
- path: '.*internal/.*'
|
||||
text: "exported (method|function|type|const) (.+) should have comment or be unexported"
|
||||
linters:
|
||||
- revive
|
||||
# Yes, they are, but it's okay in a test.
|
||||
- path: _test\.go
|
||||
text: "exported func.*returns unexported type.*which can be annoying to use"
|
||||
linters:
|
||||
- revive
|
||||
# Example test functions should be treated like main.
|
||||
- path: example.*_test\.go
|
||||
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
|
||||
linters:
|
||||
- revive
|
||||
include:
|
||||
# revive exported should have comment or be unexported.
|
||||
- EXC0012
|
||||
# revive package comment should be of the form ...
|
||||
- EXC0013
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
# Check the list against standard lib.
|
||||
# Default: false
|
||||
include-go-root: true
|
||||
# A list of packages for the list type specified.
|
||||
# Default: []
|
||||
packages:
|
||||
- "crypto/md5"
|
||||
- "crypto/sha1"
|
||||
- "crypto/**/pkix"
|
||||
ignore-file-rules:
|
||||
- "**/*_test.go"
|
||||
additional-guards:
|
||||
# Do not allow testing packages in non-test files.
|
||||
- list-type: denylist
|
||||
include-go-root: true
|
||||
packages:
|
||||
- testing
|
||||
- github.com/stretchr/testify
|
||||
ignore-file-rules:
|
||||
- "**/*_test.go"
|
||||
- "**/*test/*.go"
|
||||
- "**/internal/matchers/*.go"
|
||||
godot:
|
||||
exclude:
|
||||
# Exclude sentence fragments for lists.
|
||||
- '^[ ]*[-•]'
|
||||
# Exclude sentences prefixing a list.
|
||||
- ':$'
|
||||
goimports:
|
||||
local-prefixes: go.opentelemetry.io
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-words:
|
||||
- cancelled
|
||||
revive:
|
||||
# Sets the default failure confidence.
|
||||
# This means that linting errors with less than 0.8 confidence will be ignored.
|
||||
# Default: 0.8
|
||||
confidence: 0.01
|
||||
rules:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
|
||||
- name: bool-literal-in-expr
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
|
||||
- name: constant-logical-expr
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
|
||||
# TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
|
||||
- name: context-as-argument
|
||||
disabled: true
|
||||
arguments:
|
||||
allowTypesBefore: "*testing.T"
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
|
||||
- name: deep-exit
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
|
||||
- name: defer
|
||||
disabled: false
|
||||
arguments:
|
||||
- ["call-chain", "loop"]
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
|
||||
- name: duplicated-imports
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
|
||||
- name: early-return
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
|
||||
- name: empty-block
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
|
||||
- name: empty-lines
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
|
||||
- name: error-return
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
|
||||
- name: errorf
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
|
||||
- name: exported
|
||||
disabled: false
|
||||
arguments:
|
||||
- "sayRepetitiveInsteadOfStutters"
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
|
||||
- name: flag-parameter
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
|
||||
- name: identical-branches
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
|
||||
- name: if-return
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
|
||||
- name: increment-decrement
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
|
||||
- name: import-shadowing
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
|
||||
- name: range
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
|
||||
- name: range-val-in-closure
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
|
||||
- name: range-val-address
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
|
||||
- name: redefines-builtin-id
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
|
||||
- name: string-format
|
||||
disabled: false
|
||||
arguments:
|
||||
- - panic
|
||||
- '/^[^\n]*$/'
|
||||
- must not contain line breaks
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
|
||||
- name: struct-tag
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
|
||||
- name: superfluous-else
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
|
||||
- name: time-equal
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
arguments:
|
||||
- ["ID"] # AllowList
|
||||
- ["Otel", "Aws", "Gcp"] # DenyList
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
|
||||
- name: unconditional-recursion
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
|
||||
- name: unhandled-error
|
||||
disabled: false
|
||||
arguments:
|
||||
- "fmt.Fprint"
|
||||
- "fmt.Fprintf"
|
||||
- "fmt.Fprintln"
|
||||
- "fmt.Print"
|
||||
- "fmt.Printf"
|
||||
- "fmt.Println"
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
|
||||
- name: unnecessary-stmt
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
|
||||
- name: useless-break
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
|
||||
- name: waitgroup-by-value
|
||||
disabled: false
|
6
vendor/go.opentelemetry.io/otel/.lycheeignore
generated
vendored
Normal file
6
vendor/go.opentelemetry.io/otel/.lycheeignore
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
http://localhost
|
||||
http://jaeger-collector
|
||||
https://github.com/open-telemetry/opentelemetry-go/milestone/
|
||||
https://github.com/open-telemetry/opentelemetry-go/projects
|
||||
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
|
||||
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
|
29
vendor/go.opentelemetry.io/otel/.markdownlint.yaml
generated
vendored
Normal file
29
vendor/go.opentelemetry.io/otel/.markdownlint.yaml
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# ul-style
|
||||
MD004: false
|
||||
|
||||
# hard-tabs
|
||||
MD010: false
|
||||
|
||||
# line-length
|
||||
MD013: false
|
||||
|
||||
# no-duplicate-header
|
||||
MD024:
|
||||
siblings_only: true
|
||||
|
||||
#single-title
|
||||
MD025: false
|
||||
|
||||
# ol-prefix
|
||||
MD029:
|
||||
style: ordered
|
||||
|
||||
# no-inline-html
|
||||
MD033: false
|
||||
|
||||
# fenced-code-language
|
||||
MD040: false
|
||||
|
2369
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
Normal file
2369
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
17
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
Normal file
17
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
#####################################################
|
||||
#
|
||||
# List of approvers for this repository
|
||||
#
|
||||
#####################################################
|
||||
#
|
||||
# Learn about membership in OpenTelemetry community:
|
||||
# https://github.com/open-telemetry/community/blob/main/community-membership.md
|
||||
#
|
||||
#
|
||||
# Learn about CODEOWNERS file format:
|
||||
# https://help.github.com/en/articles/about-code-owners
|
||||
#
|
||||
|
||||
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
|
||||
|
||||
CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue