diff --git a/go.mod b/go.mod index ce0443128..eb81a1f28 100644 --- a/go.mod +++ b/go.mod @@ -5,23 +5,23 @@ go 1.24.6 require ( github.com/envoyproxy/go-control-plane/envoy v1.32.4 github.com/golang/protobuf v1.5.4 - github.com/open-policy-agent/opa v1.8.0 + github.com/open-policy-agent/opa v1.9.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.23.2 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/tools v0.37.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 google.golang.org/grpc v1.75.1 google.golang.org/protobuf v1.36.9 ) require ( - github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/containerd/containerd/v2 v2.1.4 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/platforms v1.0.0-rc.1 // indirect @@ -30,30 +30,36 @@ require ( github.com/dgraph-io/badger/v4 v4.8.0 // indirect github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/huandu/go-clone v1.7.3 // indirect + github.com/huandu/go-sqlbuilder v1.37.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect github.com/lestrrat-go/blackmagic v1.0.4 // indirect + github.com/lestrrat-go/dsig v1.0.0 // indirect + github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect - github.com/lestrrat-go/httprc/v3 v3.0.0 // indirect - github.com/lestrrat-go/jwx/v3 v3.0.10 // indirect + github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect + github.com/lestrrat-go/jwx/v3 v3.0.11 // indirect github.com/lestrrat-go/option v1.0.1 // indirect github.com/lestrrat-go/option/v2 v2.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/segmentio/asm v1.2.0 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/viper v1.20.1 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/viper v1.21.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/vektah/gqlparser/v2 v2.5.30 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect - go.uber.org/multierr v1.11.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.42.0 // indirect golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -76,7 +82,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/google/flatbuffers v25.2.10+incompatible // indirect github.com/google/uuid v1.6.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect @@ -87,30 +93,30 @@ require ( github.com/peterh/liner v1.2.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.16.1 // indirect - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/sergi/go-diff v1.4.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/pflag v1.0.7 // indirect + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/tchap/go-patricia/v2 v2.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.37.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/proto/otlp v1.7.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect go.uber.org/automaxprocs v1.6.0 // indirect golang.org/x/mod v0.28.0 // indirect golang.org/x/net v0.44.0 // indirect golang.org/x/sync v0.17.0 // indirect golang.org/x/sys v0.36.0 // indirect golang.org/x/text v0.29.0 // indirect - golang.org/x/time v0.12.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + golang.org/x/time v0.13.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect oras.land/oras-go/v2 v2.6.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index d95b3d05b..e9703c537 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= @@ -28,8 +28,9 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++ github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs= @@ -67,8 +68,8 @@ github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9L github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -79,8 +80,19 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/go-assert v1.1.6 h1:oaAfYxq9KNDi9qswn/6aE0EydfxSa+tWZC1KabNitYs= +github.com/huandu/go-assert v1.1.6/go.mod h1:JuIfbmYG9ykwvuxoJ3V8TB5QP+3+ajIA54Y44TmkMxs= +github.com/huandu/go-clone v1.7.3 h1:rtQODA+ABThEn6J5LBTppJfKmZy/FwfpMUWa8d01TTQ= +github.com/huandu/go-clone v1.7.3/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= +github.com/huandu/go-sqlbuilder v1.37.0 h1:hXgk2rTnlgFgKsmFpizhe6g/oz1wxef4qk3ixFhK6a0= +github.com/huandu/go-sqlbuilder v1.37.0/go.mod h1:zdONH67liL+/TvoUMwnZP/sUYGSSvHh9psLe/HpXn8E= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -98,12 +110,16 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38= +github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= -github.com/lestrrat-go/httprc/v3 v3.0.0 h1:nZUx/zFg5uc2rhlu1L1DidGr5Sj02JbXvGSpnY4LMrc= -github.com/lestrrat-go/httprc/v3 v3.0.0/go.mod h1:k2U1QIiyVqAKtkffbg+cUmsyiPGQsb9aAfNQiNFuQ9Q= -github.com/lestrrat-go/jwx/v3 v3.0.10 h1:XuoCBhZBncRIjMQ32HdEc76rH0xK/Qv2wq5TBouYJDw= -github.com/lestrrat-go/jwx/v3 v3.0.10/go.mod h1:kNMedLgTpHvPJkK5EMVa1JFz+UVyY2dMmZKu3qjl/Pk= +github.com/lestrrat-go/httprc/v3 v3.0.1 h1:3n7Es68YYGZb2Jf+k//llA4FTZMl3yCwIjFIk4ubevI= +github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk= +github.com/lestrrat-go/jwx/v3 v3.0.11 h1:yEeUGNUuNjcez/Voxvr7XPTYNraSQTENJgtVTfwvG/w= +github.com/lestrrat-go/jwx/v3 v3.0.11/go.mod h1:XSOAh2SiXm0QgRe3DulLZLyt+wUuEdFo81zuKTLcvgQ= github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss= @@ -120,8 +136,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/open-policy-agent/opa v1.8.0 h1:4JdYuZcANeUF1v/87NGpirocpaZzJA0PcuL7xfmsMNM= -github.com/open-policy-agent/opa v1.8.0/go.mod h1:vOVZuIJQISnaYcZtQ58yTDkVCp1FmGPwK43pO9qPDqM= +github.com/open-policy-agent/opa v1.9.0 h1:QWFNwbcc29IRy0xwD3hRrMc/RtSersLY1Z6TaID3vgI= +github.com/open-policy-agent/opa v1.9.0/go.mod h1:72+lKmTda0O48m1VKAxxYl7MjP/EWFZu9fxHQK2xihs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -134,8 +150,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= @@ -144,37 +161,37 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= -github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= -github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= -github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -202,38 +219,36 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 h1:rbRJ8BBoVMsQShESYZ0FkvcITu8X8QNwJogcLUmDNNw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0/go.mod h1:ru6KHrNtNHxM4nD/vd6QrLVWgKhxPYgblq4VAtNawTQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= go.opentelemetry.io/contrib/propagators/b3 v1.37.0 h1:0aGKdIuVhy5l4GClAjl72ntkZJhijf2wg1S7b5oLoYA= go.opentelemetry.io/contrib/propagators/b3 v1.37.0/go.mod h1:nhyrxEJEOQdwR15zXrCKI6+cJK60PXAkJ/jRyfhr2mg= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= -go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -270,8 +285,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -285,10 +300,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= -google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go index c1f3e442d..79d425e87 100644 --- a/vendor/github.com/cenkalti/backoff/v5/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -1,7 +1,7 @@ package backoff import ( - "math/rand" + "math/rand/v2" "time" ) @@ -28,13 +28,7 @@ multiplied by the exponential, that is, between 2 and 6 seconds. Note: MaxInterval caps the RetryInterval and not the randomized interval. -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: +Example: Given the following default arguments, for 9 tries the sequence will be: Request # RetryInterval (seconds) Randomized Interval (seconds) @@ -47,7 +41,6 @@ and assuming we go over the MaxElapsedTime on the 10th try: 7 5.692 [2.846, 8.538] 8 8.538 [4.269, 12.807] 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop Note: Implementation is not thread-safe. */ diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go index e43f47fb8..32a7f9883 100644 --- a/vendor/github.com/cenkalti/backoff/v5/retry.go +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -47,7 +47,7 @@ func WithNotify(n Notify) RetryOption { } } -// WithMaxTries limits the number of retry attempts. +// WithMaxTries limits the number of all attempts. func WithMaxTries(n uint) RetryOption { return func(args *retryOptions) { args.MaxTries = n @@ -97,7 +97,7 @@ func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOpti // Handle permanent errors without retrying. var permanent *PermanentError if errors.As(err, &permanent) { - return res, err + return res, permanent.Unwrap() } // Stop retrying if context is cancelled. diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go index fab643764..8ad50936c 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strings" + "sync" "sync/atomic" "unicode" "unsafe" @@ -17,22 +18,27 @@ var ( typeAddr *runtime.TypeAddr cachedDecoderMap unsafe.Pointer // map[uintptr]decoder cachedDecoder []Decoder + initOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initDecoder() { + initOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadDecoderMap() map[uintptr]Decoder { + initDecoder() p := atomic.LoadPointer(&cachedDecoderMap) return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) } func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + initDecoder() newDecoderMap := make(map[uintptr]Decoder, len(m)+1) newDecoderMap[typ] = dec diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go index eb7e2b134..025ca85b5 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -10,6 +10,7 @@ import ( ) func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go index 49cdda4a1..023b817c3 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -13,6 +13,7 @@ import ( var decMu sync.RWMutex func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go index 37b7aa38e..b10763689 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -5,6 +5,7 @@ import ( "encoding" "encoding/json" "reflect" + "sync" "sync/atomic" "unsafe" @@ -24,14 +25,17 @@ var ( cachedOpcodeSets []*OpcodeSet cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet typeAddr *runtime.TypeAddr + initEncoderOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initEncoder() { + initEncoderOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadOpcodeMap() map[uintptr]*OpcodeSet { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go index 20c93cbf7..b6f45a49b 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -4,6 +4,7 @@ package encoder func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go index 13ba23fdf..47b482f7f 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -10,6 +10,7 @@ import ( var setsMu sync.RWMutex func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go index 14eb6a0d6..b436f5b21 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -406,6 +406,11 @@ func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{ rv = newV } } + + if rv.Kind() == reflect.Ptr && rv.IsNil() { + return AppendNull(ctx, b), nil + } + v = rv.Interface() var bb []byte if (code.Flags & MarshalerContextFlags) != 0 { diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go index 0167cd2c0..4b693cb0b 100644 --- a/vendor/github.com/goccy/go-json/internal/runtime/type.go +++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go @@ -2,6 +2,7 @@ package runtime import ( "reflect" + "sync" "unsafe" ) @@ -23,8 +24,8 @@ type TypeAddr struct { } var ( - typeAddr *TypeAddr - alreadyAnalyzed bool + typeAddr *TypeAddr + once sync.Once ) //go:linkname typelinks reflect.typelinks @@ -34,67 +35,64 @@ func typelinks() ([]unsafe.Pointer, [][]int32) func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer func AnalyzeTypeAddr() *TypeAddr { - defer func() { - alreadyAnalyzed = true - }() - if alreadyAnalyzed { - return typeAddr - } - sections, offsets := typelinks() - if len(sections) != 1 { - return nil - } - if len(offsets) != 1 { - return nil - } - section := sections[0] - offset := offsets[0] - var ( - min uintptr = uintptr(^uint(0)) - max uintptr = 0 - isAligned64 = true - isAligned32 = true - ) - for i := 0; i < len(offset); i++ { - typ := (*Type)(rtypeOff(section, offset[i])) - addr := uintptr(unsafe.Pointer(typ)) - if min > addr { - min = addr + once.Do(func() { + sections, offsets := typelinks() + if len(sections) != 1 { + return } - if max < addr { - max = addr + if len(offsets) != 1 { + return } - if typ.Kind() == reflect.Ptr { - addr = uintptr(unsafe.Pointer(typ.Elem())) + section := sections[0] + offset := offsets[0] + var ( + min uintptr = uintptr(^uint(0)) + max uintptr = 0 + isAligned64 = true + isAligned32 = true + ) + for i := 0; i < len(offset); i++ { + typ := (*Type)(rtypeOff(section, offset[i])) + addr := uintptr(unsafe.Pointer(typ)) if min > addr { min = addr } if max < addr { max = addr } + if typ.Kind() == reflect.Ptr { + addr = uintptr(unsafe.Pointer(typ.Elem())) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + } + isAligned64 = isAligned64 && (addr-min)&63 == 0 + isAligned32 = isAligned32 && (addr-min)&31 == 0 + } + addrRange := max - min + if addrRange == 0 { + return + } + var addrShift uintptr + if isAligned64 { + addrShift = 6 + } else if isAligned32 { + addrShift = 5 } - isAligned64 = isAligned64 && (addr-min)&63 == 0 - isAligned32 = isAligned32 && (addr-min)&31 == 0 - } - addrRange := max - min - if addrRange == 0 { - return nil - } - var addrShift uintptr - if isAligned64 { - addrShift = 6 - } else if isAligned32 { - addrShift = 5 - } - cacheSize := addrRange >> addrShift - if cacheSize > maxAcceptableTypeAddrRange { - return nil - } - typeAddr = &TypeAddr{ - BaseTypeAddr: min, - MaxTypeAddr: max, - AddrRange: addrRange, - AddrShift: addrShift, - } + cacheSize := addrRange >> addrShift + if cacheSize > maxAcceptableTypeAddrRange { + return + } + typeAddr = &TypeAddr{ + BaseTypeAddr: min, + MaxTypeAddr: max, + AddrRange: addrRange, + AddrShift: addrShift, + } + }) + return typeAddr } diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.gitignore b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml new file mode 100644 index 000000000..7e7b8a962 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +linters: + fast: false + disable-all: true + enable: + - revive + - megacheck + - govet + - unconvert + - gas + - gocyclo + - dupl + - misspell + - unparam + - unused + - typecheck + - ineffassign + # - stylecheck + - exportloopref + - gocritic + - nakedret + - gosimple + - prealloc + +# golangci-lint configuration file +linters-settings: + revive: + ignore-generated-header: true + severity: warning + rules: + - name: package-comments + severity: warning + disabled: true + - name: exported + severity: warning + disabled: false + arguments: ["checkPrivateReceivers", "disableStutteringCheck"] + +issues: + exclude-use-default: false + exclude-rules: + - path: _test\.go + linters: + - dupl diff --git a/vendor/github.com/hashicorp/golang-lru/v2/2q.go b/vendor/github.com/hashicorp/golang-lru/v2/2q.go new file mode 100644 index 000000000..8c95252b6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/2q.go @@ -0,0 +1,267 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lru + +import ( + "errors" + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache[K comparable, V any] struct { + size int + recentSize int + recentRatio float64 + ghostRatio float64 + + recent simplelru.LRUCache[K, V] + frequent simplelru.LRUCache[K, V] + recentEvict simplelru.LRUCache[K, struct{}] + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) { + return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) { + if size <= 0 { + return nil, errors.New("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, errors.New("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, errors.New("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU[K, V](size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU[K, V](size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache[K, V]{ + size: size, + recentSize: recentSize, + recentRatio: recentRatio, + ghostRatio: ghostRatio, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return +} + +// Add adds a value to the cache. +func (c *TwoQueueCache[K, V]) Add(key K, value V) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, struct{}{}) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache[K, V]) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Resize changes the cache size. +func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) { + c.lock.Lock() + defer c.lock.Unlock() + + // Recalculate the sub-sizes + recentSize := int(float64(size) * c.recentRatio) + evictSize := int(float64(size) * c.ghostRatio) + c.size = size + c.recentSize = recentSize + + // ensureSpace + diff := c.recent.Len() + c.frequent.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.ensureSpace(true) + } + + // Reallocate the LRUs + c.recent.Resize(size) + c.frequent.Resize(size) + c.recentEvict.Resize(evictSize) + + return diff +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache[K, V]) Keys() []K { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Values returns a slice of the values in the cache. +// The frequently used values are first in the returned slice. +func (c *TwoQueueCache[K, V]) Values() []V { + c.lock.RLock() + defer c.lock.RUnlock() + v1 := c.frequent.Values() + v2 := c.recent.Values() + return append(v1, v2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache[K, V]) Remove(key K) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache[K, V]) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache[K, V]) Contains(key K) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE new file mode 100644 index 000000000..0e5d580e0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE @@ -0,0 +1,364 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/README.md b/vendor/github.com/hashicorp/golang-lru/v2/README.md new file mode 100644 index 000000000..a942eb539 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/README.md @@ -0,0 +1,79 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2) + +LRU cache example +================= + +```go +package main + +import ( + "fmt" + "github.com/hashicorp/golang-lru/v2" +) + +func main() { + l, _ := lru.New[int, any](128) + for i := 0; i < 256; i++ { + l.Add(i, nil) + } + if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) + } +} +``` + +Expirable LRU cache example +=========================== + +```go +package main + +import ( + "fmt" + "time" + + "github.com/hashicorp/golang-lru/v2/expirable" +) + +func main() { + // make cache with 10ms TTL and 5 max keys + cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10) + + + // set value under key1. + cache.Add("key1", "val1") + + // get value under key1 + r, ok := cache.Get("key1") + + // check for OK value + if ok { + fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r) + } + + // wait for cache to expire + time.Sleep(time.Millisecond * 12) + + // get value under key1 after key expiration + r, ok = cache.Get("key1") + fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r) + + // set value under key2, would evict old entry because it is already expired. + cache.Add("key2", "val2") + + fmt.Printf("Cache len: %d\n", cache.Len()) + // Output: + // value before expiration is found: true, value: "val1" + // value after expiration is found: false, value: "" + // Cache len: 1 +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/v2/doc.go b/vendor/github.com/hashicorp/golang-lru/v2/doc.go new file mode 100644 index 000000000..24107ee0e --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the LRU implementation in +// groupcache: https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, at +// the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as well +// as recent usage in both the frequent and recent caches. Its computational +// overhead is comparable to TwoQueueCache, but the memory overhead is linear +// with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. For this reason, it is in a separate go module contained within +// this repository. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 000000000..5cd74a034 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/lru.go new file mode 100644 index 000000000..a2655f1f3 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/lru.go @@ -0,0 +1,250 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +const ( + // DefaultEvictedBufferSize defines the default buffer size to store evicted key/val + DefaultEvictedBufferSize = 16 +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache[K comparable, V any] struct { + lru *simplelru.LRU[K, V] + evictedKeys []K + evictedVals []V + onEvictedCB func(k K, v V) + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New[K comparable, V any](size int) (*Cache[K, V], error) { + return NewWithEvict[K, V](size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) { + // create a cache with default settings + c = &Cache[K, V]{ + onEvictedCB: onEvicted, + } + if onEvicted != nil { + c.initEvictBuffers() + onEvicted = c.onEvicted + } + c.lru, err = simplelru.NewLRU(size, onEvicted) + return +} + +func (c *Cache[K, V]) initEvictBuffers() { + c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize) + c.evictedVals = make([]V, 0, DefaultEvictedBufferSize) +} + +// onEvicted save evicted key/val and sent in externally registered callback +// outside of critical section +func (c *Cache[K, V]) onEvicted(k K, v V) { + c.evictedKeys = append(c.evictedKeys, k) + c.evictedVals = append(c.evictedVals, v) +} + +// Purge is used to completely clear the cache. +func (c *Cache[K, V]) Purge() { + var ks []K + var vs []V + c.lock.Lock() + c.lru.Purge() + if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + // invoke callback outside of critical section + if c.onEvictedCB != nil { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { + var k K + var v V + c.lock.Lock() + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return +} + +// Get looks up a key's value from the cache. +func (c *Cache[K, V]) Get(key K) (value V, ok bool) { + c.lock.Lock() + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache[K, V]) Contains(key K) bool { + c.lock.RLock() + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { + c.lock.RLock() + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) { + var k K + var v V + c.lock.Lock() + if c.lru.Contains(key) { + c.lock.Unlock() + return true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return false, evicted +} + +// PeekOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) { + var k K + var v V + c.lock.Lock() + previous, ok = c.lru.Peek(key) + if ok { + c.lock.Unlock() + return previous, true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache[K, V]) Remove(key K) (present bool) { + var k K + var v V + c.lock.Lock() + present = c.lru.Remove(key) + if c.onEvictedCB != nil && present { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && present { + c.onEvictedCB(k, v) + } + return +} + +// Resize changes the cache size. +func (c *Cache[K, V]) Resize(size int) (evicted int) { + var ks []K + var vs []V + c.lock.Lock() + evicted = c.lru.Resize(size) + if c.onEvictedCB != nil && evicted > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted > 0 { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } + return evicted +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) { + var k K + var v V + c.lock.Lock() + key, value, ok = c.lru.RemoveOldest() + if c.onEvictedCB != nil && ok { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && ok { + c.onEvictedCB(k, v) + } + return +} + +// GetOldest returns the oldest entry +func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) { + c.lock.RLock() + key, value, ok = c.lru.GetOldest() + c.lock.RUnlock() + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache[K, V]) Keys() []K { + c.lock.RLock() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *Cache[K, V]) Values() []V { + c.lock.RLock() + values := c.lru.Values() + c.lock.RUnlock() + return values +} + +// Len returns the number of items in the cache. +func (c *Cache[K, V]) Len() int { + c.lock.RLock() + length := c.lru.Len() + c.lock.RUnlock() + return length +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list new file mode 100644 index 000000000..c4764e6b2 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list @@ -0,0 +1,29 @@ +This license applies to simplelru/list.go + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go new file mode 100644 index 000000000..f69792388 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package simplelru + +import ( + "errors" + + "github.com/hashicorp/golang-lru/v2/internal" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback[K comparable, V any] func(key K, value V) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU[K comparable, V any] struct { + size int + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] + onEvict EvictCallback[K, V] +} + +// NewLRU constructs an LRU of the given size +func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) { + if size <= 0 { + return nil, errors.New("must provide a positive size") + } + + c := &LRU[K, V]{ + size: size, + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU[K, V]) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value = value + return false + } + + // Add new item + ent := c.evictList.PushFront(key, value) + c.items[key] = ent + + evict := c.evictList.Length() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU[K, V]) Contains(key K) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + var ent *internal.Entry[K, V] + if ent, ok = c.items[key]; ok { + return ent.Value, true + } + return +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU[K, V]) Remove(key K) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + return ent.Key, ent.Value, true + } + return +} + +// GetOldest returns the oldest entry +func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true + } + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU[K, V]) Keys() []K { + keys := make([]K, c.evictList.Length()) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key + i++ + } + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *LRU[K, V]) Values() []V { + values := make([]V, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value + i++ + } + return values +} + +// Len returns the number of items in the cache. +func (c *LRU[K, V]) Len() int { + return c.evictList.Length() +} + +// Resize changes the cache size. +func (c *LRU[K, V]) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU[K, V]) removeOldest() { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { + c.evictList.Remove(e) + delete(c.items, e.Key) + if c.onEvict != nil { + c.onEvict(e.Key, e.Value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go new file mode 100644 index 000000000..043b8bcc3 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package simplelru provides simple LRU implementation based on build-in container/list. +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache[K comparable, V any] interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key K, value V) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key K) (value V, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key K) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key K) (value V, ok bool) + + // Removes a key from the cache. + Remove(key K) bool + + // Removes the oldest entry from cache. + RemoveOldest() (K, V, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (K, V, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []K + + // Values returns a slice of the values in the cache, from oldest to newest. + Values() []V + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/huandu/go-clone/.gitignore b/vendor/github.com/huandu/go-clone/.gitignore new file mode 100644 index 000000000..cb6631ccf --- /dev/null +++ b/vendor/github.com/huandu/go-clone/.gitignore @@ -0,0 +1,40 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Intellij +*.iml +.idea/ + +# VS Code +debug +debug_test +.vscode/ + +# Mac +.DS_Store + +# go workspace +go.work +go.work.sum diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/github.com/huandu/go-clone/LICENSE similarity index 95% rename from vendor/go.uber.org/multierr/LICENSE.txt rename to vendor/github.com/huandu/go-clone/LICENSE index 413e30f7c..fa5655e92 100644 --- a/vendor/go.uber.org/multierr/LICENSE.txt +++ b/vendor/github.com/huandu/go-clone/LICENSE @@ -1,19 +1,19 @@ -Copyright (c) 2017-2021 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +Copyright (c) 2019 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/huandu/go-clone/README.md b/vendor/github.com/huandu/go-clone/README.md new file mode 100644 index 000000000..9e3a610ca --- /dev/null +++ b/vendor/github.com/huandu/go-clone/README.md @@ -0,0 +1,296 @@ +# go-clone: Clone any Go data structure deeply and thoroughly + +[![Go](https://github.com/huandu/go-clone/workflows/Go/badge.svg)](https://github.com/huandu/go-clone/actions) +[![Go Doc](https://godoc.org/github.com/huandu/go-clone?status.svg)](https://pkg.go.dev/github.com/huandu/go-clone) +[![Go Report](https://goreportcard.com/badge/github.com/huandu/go-clone)](https://goreportcard.com/report/github.com/huandu/go-clone) +[![Coverage Status](https://coveralls.io/repos/github/huandu/go-clone/badge.svg?branch=master)](https://coveralls.io/github/huandu/go-clone?branch=master) + +Package `clone` provides functions to deep clone any Go data. It also provides a wrapper to protect a pointer from any unexpected mutation. + +For users who use Go 1.18+, it's recommended to import `github.com/huandu/go-clone/generic` for generic APIs and arena support. + +`Clone`/`Slowly` can clone unexported fields and "no-copy" structs as well. Use this feature wisely. + +## Install + +Use `go get` to install this package. + +```shell +go get github.com/huandu/go-clone +``` + +## Usage + +### `Clone` and `Slowly` + +If we want to clone any Go value, use `Clone`. + +```go +t := &T{...} +v := clone.Clone(t).(*T) +reflect.DeepEqual(t, v) // true +``` + +For the sake of performance, `Clone` doesn't deal with values containing pointer cycles. +If we need to clone such values, use `Slowly` instead. + +```go +type ListNode struct { + Data int + Next *ListNode +} +node1 := &ListNode{ + Data: 1, +} +node2 := &ListNode{ + Data: 2, +} +node3 := &ListNode{ + Data: 3, +} +node1.Next = node2 +node2.Next = node3 +node3.Next = node1 + +// We must use `Slowly` to clone a circular linked list. +node := Slowly(node1).(*ListNode) + +for i := 0; i < 10; i++ { + fmt.Println(node.Data) + node = node.Next +} +``` + +### Generic APIs + +Starting from go1.18, Go started to support generic. With generic syntax, `Clone`/`Slowly` and other APIs can be called much cleaner like following. + +```go +import "github.com/huandu/go-clone/generic" + +type MyType struct { + Foo string +} + +original := &MyType{ + Foo: "bar", +} + +// The type of cloned is *MyType instead of interface{}. +cloned := Clone(original) +println(cloned.Foo) // Output: bar +``` + +It's required to update minimal Go version to 1.18 to opt-in generic syntax. It may not be a wise choice to update this package's `go.mod` and drop so many old Go compilers for such syntax candy. Therefore, I decide to create a new standalone package `github.com/huandu/go-clone/generic` to provide APIs with generic syntax. + +For new users who use Go 1.18+, the generic package is preferred and recommended. + +### Arena support + +Starting from Go1.20, arena is introduced as a new way to allocate memory. It's quite useful to improve overall performance in special scenarios. +In order to clone a value with memory allocated from an arena, there are new methods `ArenaClone` and `ArenaCloneSlowly` available in `github.com/huandu/go-clone/generic`. + +```go +// ArenaClone recursively deep clones v to a new value in arena a. +// It works in the same way as Clone, except it allocates all memory from arena. +func ArenaClone[T any](a *arena.Arena, v T) (nv T) + +// ArenaCloneSlowly recursively deep clones v to a new value in arena a. +// It works in the same way as Slowly, except it allocates all memory from arena. +func ArenaCloneSlowly[T any](a *arena.Arena, v T) (nv T) +``` + +Due to limitations in arena API, memory of the internal data structure of `map` and `chan` is always allocated in heap by Go runtime ([see this issue](https://github.com/golang/go/issues/56230)). + +**Warning**: Per [discussion in the arena proposal](https://github.com/golang/go/issues/51317), the arena package may be changed incompatibly or removed in future. All arena related APIs in this package will be changed accordingly. + +### Struct tags + +There are some struct tags to control how to clone a struct field. + +```go +type T struct { + Normal *int + Foo *int `clone:"skip"` // Skip cloning this field so that Foo will be zero in cloned value. + Bar *int `clone:"-"` // "-" is an alias of skip. + Baz *int `clone:"shadowcopy"` // Copy this field by shadow copy. +} + +a := 1 +t := &T{ + Normal: &a, + Foo: &a, + Bar: &a, + Baz: &a, +} +v := clone.Clone(t).(*T) + +fmt.Println(v.Normal == t.Normal) // false +fmt.Println(v.Foo == nil) // true +fmt.Println(v.Bar == nil) // true +fmt.Println(v.Baz == t.Baz) // true +``` + +### Memory allocations and the `Allocator` + +The `Allocator` is designed to allocate memory when cloning. It's also used to hold all customizations, e.g. custom clone functions, scalar types and opaque pointers, etc. There is a default allocator which allocates memory from heap. Almost all public APIs in this package use this default allocator to do their job. + +We can control how to allocate memory by creating a new `Allocator` by `NewAllocator`. It enables us to take full control over memory allocation when cloning. See [Allocator sample code](https://pkg.go.dev/github.com/huandu/go-clone#example-Allocator) to understand how to customize an allocator. + +Let's take a closer look at the `NewAllocator` function. + +```go +func NewAllocator(pool unsafe.Pointer, methods *AllocatorMethods) *Allocator +``` + +- The first parameter `pool` is a pointer to a memory pool. It's used to allocate memory for cloning. It can be `nil` if we don't need a memory pool. +- The second parameter `methods` is a pointer to a struct which contains all methods to allocate memory. It can be `nil` if we don't need to customize memory allocation. +- The `Allocator` struct is allocated from the `methods.New` or the `methods.Parent` allocator or from heap. + +The `Parent` in `AllocatorMethods` is used to indicate the parent of the new allocator. With this feature, we can orgnize allocators into a tree structure. All customizations, including custom clone functions, scalar types and opaque pointers, etc, are inherited from parent allocators. + +There are some APIs designed for convenience. + +- We can create dedicated allocators for heap or arena by calling `FromHeap()` or `FromArena(a *arena.Arena)`. +- We can call `MakeCloner(allocator)` to create a helper struct with `Clone` and `CloneSlowly` methods in which the type of in and out parameters is `interface{}`. + +### Mark struct type as scalar + +Some struct types can be considered as scalar. + +A well-known case is `time.Time`. +Although there is a pointer `loc *time.Location` inside `time.Time`, we always use `time.Time` by value in all methods. +When cloning `time.Time`, it should be OK to return a shadow copy. + +Currently, following types are marked as scalar by default. + +- `time.Time` +- `reflect.Value` + +If there is any type defined in built-in package should be considered as scalar, please open new issue to let me know. +I will update the default. + +If there is any custom type should be considered as scalar, call `MarkAsScalar` to mark it manually. See [MarkAsScalar sample code](https://pkg.go.dev/github.com/huandu/go-clone#example-MarkAsScalar) for more details. + +### Mark pointer type as opaque + +Some pointer values are used as enumerable const values. + +A well-known case is `elliptic.Curve`. In package `crypto/tls`, curve type of a certificate is checked by comparing values to pre-defined curve values, e.g. `elliptic.P521()`. In this case, the curve values, which are pointers or structs, cannot be cloned deeply. + +Currently, following types are marked as scalar by default. + +- `elliptic.Curve`, which is `*elliptic.CurveParam` or `elliptic.p256Curve`. +- `reflect.Type`, which is `*reflect.rtype` defined in `runtime`. + +If there is any pointer type defined in built-in package should be considered as opaque, please open new issue to let me know. +I will update the default. + +If there is any custom pointer type should be considered as opaque, call `MarkAsOpaquePointer` to mark it manually. See [MarkAsOpaquePointer sample code](https://pkg.go.dev/github.com/huandu/go-clone#example-MarkAsOpaquePointer) for more details. + +### Clone "no-copy" types defined in `sync` and `sync/atomic` + +There are some "no-copy" types like `sync.Mutex`, `atomic.Value`, etc. +They cannot be cloned by copying all fields one by one, but we can alloc a new zero value and call methods to do proper initialization. + +Currently, all "no-copy" types defined in `sync` and `sync/atomic` can be cloned properly using following strategies. + +- `sync.Mutex`: Cloned value is a newly allocated zero mutex. +- `sync.RWMutex`: Cloned value is a newly allocated zero mutex. +- `sync.WaitGroup`: Cloned value is a newly allocated zero wait group. +- `sync.Cond`: Cloned value is a cond with a newly allocated zero lock. +- `sync.Pool`: Cloned value is an empty pool with the same `New` function. +- `sync.Map`: Cloned value is a sync map with cloned key/value pairs. +- `sync.Once`: Cloned value is a once type with the same done flag. +- `atomic.Value`/`atomic.Bool`/`atomic.Int32`/`atomic.Int64`/`atomic.Uint32`/`atomic.Uint64`/`atomic.Uintptr`: Cloned value is a new atomic value with the same value. + +If there is any type defined in built-in package should be considered as "no-copy" types, please open new issue to let me know. +I will update the default. + +### Set custom clone functions + +If default clone strategy doesn't work for a struct type, we can call `SetCustomFunc` to register a custom clone function. + +```go +SetCustomFunc(reflect.TypeOf(MyType{}), func(allocator *Allocator, old, new reflect.Value) { + // Customized logic to copy the old to the new. + // The old's type is MyType. + // The new is a zero value of MyType and new.CanAddr() always returns true. +}) +``` + +We can use `allocator` to clone any value or allocate new memory. +It's allowed to call `allocator.Clone` or `allocator.CloneSlowly` on `old` to clone its struct fields in depth without worrying about dead loop. + +See [SetCustomFunc sample code](https://pkg.go.dev/github.com/huandu/go-clone#example-SetCustomFunc) for more details. + +### Clone `atomic.Pointer[T]` + +As there is no way to predefine a custom clone function for generic type `atomic.Pointer[T]`, cloning such atomic type is not supported by default. If we want to support it, we need to register a custom clone function manually. + +Suppose we instantiate `atomic.Pointer[T]` with type `MyType1` and `MyType2` in a project, and then we can register custom clone functions like following. + +```go +import "github.com/huandu/go-clone/generic" + +func init() { + // Register all instantiated atomic.Pointer[T] types in this project. + clone.RegisterAtomicPointer[MyType1]() + clone.RegisterAtomicPointer[MyType2]() +} +``` + +### `Wrap`, `Unwrap` and `Undo` + +Package `clone` provides `Wrap`/`Unwrap` functions to protect a pointer value from any unexpected mutation. +It's useful when we want to protect a variable which should be immutable by design, +e.g. global config, the value stored in context, the value sent to a chan, etc. + +```go +// Suppose we have a type T defined as following. +// type T struct { +// Foo int +// } +v := &T{ + Foo: 123, +} +w := Wrap(v).(*T) // Wrap value to protect it. + +// Use w freely. The type of w is the same as that of v. + +// It's OK to modify w. The change will not affect v. +w.Foo = 456 +fmt.Println(w.Foo) // 456 +fmt.Println(v.Foo) // 123 + +// Once we need the original value stored in w, call `Unwrap`. +orig := Unwrap(w).(*T) +fmt.Println(orig == v) // true +fmt.Println(orig.Foo) // 123 + +// Or, we can simply undo any change made in w. +// Note that `Undo` is significantly slower than `Unwrap`, thus +// the latter is always preferred. +Undo(w) +fmt.Println(w.Foo) // 123 +``` + +## Performance + +Here is the performance data running on my dev machine. + +```text +go 1.20.1 +goos: darwin +goarch: amd64 +cpu: Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz +BenchmarkSimpleClone-12 7164530 156.7 ns/op 24 B/op 1 allocs/op +BenchmarkComplexClone-12 628056 1871 ns/op 1488 B/op 21 allocs/op +BenchmarkUnwrap-12 15498139 78.02 ns/op 0 B/op 0 allocs/op +BenchmarkSimpleWrap-12 3882360 309.7 ns/op 72 B/op 2 allocs/op +BenchmarkComplexWrap-12 949654 1245 ns/op 736 B/op 15 allocs/op +``` + +## License + +This package is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/go-clone/allocator.go b/vendor/github.com/huandu/go-clone/allocator.go new file mode 100644 index 000000000..af415f339 --- /dev/null +++ b/vendor/github.com/huandu/go-clone/allocator.go @@ -0,0 +1,335 @@ +// Copyright 2023 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package clone + +import ( + "reflect" + "runtime" + "sync" + "unsafe" +) + +const fieldTagName = "clone" +const fieldTagValueSkip = "skip" +const fieldTagValueSkipAlias = "-" +const fieldTagValueShadowCopy = "shadowcopy" + +var typeOfAllocator = reflect.TypeOf(Allocator{}) + +// defaultAllocator is the default allocator and allocates memory from heap. +var defaultAllocator = &Allocator{ + new: heapNew, + makeSlice: heapMakeSlice, + makeMap: heapMakeMap, + makeChan: heapMakeChan, + isScalar: IsScalar, +} + +// Allocator is a utility type for memory allocation. +type Allocator struct { + parent *Allocator + + pool unsafe.Pointer + new func(pool unsafe.Pointer, t reflect.Type) reflect.Value + makeSlice func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value + makeMap func(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value + makeChan func(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value + isScalar func(t reflect.Kind) bool + + cachedStructTypes sync.Map + cachedPointerTypes sync.Map + cachedCustomFuncTypes sync.Map +} + +// FromHeap creates an allocator which allocate memory from heap. +func FromHeap() *Allocator { + return NewAllocator(nil, nil) +} + +// NewAllocator creates an allocator which allocate memory from the pool. +// Both pool and methods are optional. +// +// If methods.New is not nil, the allocator itself is created by calling methods.New. +// +// The pool is a pointer to the memory pool which is opaque to the allocator. +// It's methods's responsibility to allocate memory from the pool properly. +func NewAllocator(pool unsafe.Pointer, methods *AllocatorMethods) (allocator *Allocator) { + parent := methods.parent() + new := methods.new(parent, pool) + + // Allocate the allocator from the pool. + val := new(pool, typeOfAllocator) + allocator = (*Allocator)(unsafe.Pointer(val.Pointer())) + runtime.KeepAlive(val) + + allocator.pool = pool + allocator.new = new + allocator.makeSlice = methods.makeSlice(parent, pool) + allocator.makeMap = methods.makeMap(parent, pool) + allocator.makeChan = methods.makeChan(parent, pool) + allocator.isScalar = methods.isScalar(parent) + + if parent == nil { + parent = defaultAllocator + } + + allocator.parent = parent + return +} + +// New returns a new zero value of t. +func (a *Allocator) New(t reflect.Type) reflect.Value { + return a.new(a.pool, t) +} + +// MakeSlice creates a new zero-initialized slice value of t with len and cap. +func (a *Allocator) MakeSlice(t reflect.Type, len, cap int) reflect.Value { + return a.makeSlice(a.pool, t, len, cap) +} + +// MakeMap creates a new map with minimum size n. +func (a *Allocator) MakeMap(t reflect.Type, n int) reflect.Value { + return a.makeMap(a.pool, t, n) +} + +// MakeChan creates a new chan with buffer. +func (a *Allocator) MakeChan(t reflect.Type, buffer int) reflect.Value { + return a.makeChan(a.pool, t, buffer) +} + +// Clone recursively deep clone val to a new value with memory allocated from a. +func (a *Allocator) Clone(val reflect.Value) reflect.Value { + return a.clone(val, true) +} + +func (a *Allocator) clone(val reflect.Value, inCustomFunc bool) reflect.Value { + if !val.IsValid() { + return val + } + + state := &cloneState{ + allocator: a, + } + + if inCustomFunc { + state.skipCustomFuncValue = val + } + + return state.clone(val) +} + +// CloneSlowly recursively deep clone val to a new value with memory allocated from a. +// It marks all cloned values internally, thus it can clone v with cycle pointer. +func (a *Allocator) CloneSlowly(val reflect.Value) reflect.Value { + return a.cloneSlowly(val, true) +} + +func (a *Allocator) cloneSlowly(val reflect.Value, inCustomFunc bool) reflect.Value { + if !val.IsValid() { + return val + } + + state := &cloneState{ + allocator: a, + visited: visitMap{}, + invalid: invalidPointers{}, + } + + if inCustomFunc { + state.skipCustomFuncValue = val + } + + cloned := state.clone(val) + state.fix(cloned) + return cloned +} + +func (a *Allocator) loadStructType(t reflect.Type) (st structType) { + st, ok := a.lookupStructType(t) + + if ok { + return + } + + num := t.NumField() + zeroFeilds := make([]structFieldSize, 0, num) + pointerFields := make([]structFieldType, 0, num) + + // Find pointer fields in depth-first order. + for i := 0; i < num; i++ { + field := t.Field(i) + ft := field.Type + k := ft.Kind() + tag := field.Tag.Get(fieldTagName) + + if tag == fieldTagValueSkip || tag == fieldTagValueSkipAlias { + zeroFeilds = append(zeroFeilds, structFieldSize{ + Offset: field.Offset, + Size: uintptr(ft.Size()), + }) + continue + } + + if tag == fieldTagValueShadowCopy || a.isScalar(k) { + continue + } + + switch k { + case reflect.Array: + if ft.Len() == 0 { + continue + } + + elem := ft.Elem() + + if a.isScalar(elem.Kind()) { + continue + } + + if elem.Kind() == reflect.Struct { + if fst := a.loadStructType(elem); fst.CanShadowCopy() { + continue + } + } + case reflect.Struct: + if fst := a.loadStructType(ft); fst.CanShadowCopy() { + continue + } + } + + pointerFields = append(pointerFields, structFieldType{ + Offset: field.Offset, + Index: i, + }) + } + + st = structType{} + + if len(zeroFeilds) != 0 { + st.ZeroFields = append(st.ZeroFields, zeroFeilds...) + } + + if len(pointerFields) != 0 { + st.PointerFields = append(st.PointerFields, pointerFields...) + } + + // Load custom function. + current := a + + for current != nil { + if fn, ok := current.cachedCustomFuncTypes.Load(t); ok { + st.fn = fn.(Func) + break + } + + current = current.parent + } + + a.cachedStructTypes.LoadOrStore(t, st) + return +} + +func (a *Allocator) lookupStructType(t reflect.Type) (st structType, ok bool) { + var v interface{} + current := a + + for current != nil { + v, ok = current.cachedStructTypes.Load(t) + + if ok { + st = v.(structType) + return + } + + current = current.parent + } + + return +} + +func (a *Allocator) isOpaquePointer(t reflect.Type) (ok bool) { + current := a + + for current != nil { + if _, ok = current.cachedPointerTypes.Load(t); ok { + return + } + + current = current.parent + } + + return +} + +// MarkAsScalar marks t as a scalar type so that all clone methods will copy t by value. +// If t is not struct or pointer to struct, MarkAsScalar ignores t. +// +// In the most cases, it's not necessary to call it explicitly. +// If a struct type contains scalar type fields only, the struct will be marked as scalar automatically. +// +// Here is a list of types marked as scalar by default: +// - time.Time +// - reflect.Value +func (a *Allocator) MarkAsScalar(t reflect.Type) { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return + } + + a.cachedStructTypes.Store(t, zeroStructType) +} + +// MarkAsOpaquePointer marks t as an opaque pointer so that all clone methods will copy t by value. +// If t is not a pointer, MarkAsOpaquePointer ignores t. +// +// Here is a list of types marked as opaque pointers by default: +// - `elliptic.Curve`, which is `*elliptic.CurveParam` or `elliptic.p256Curve`; +// - `reflect.Type`, which is `*reflect.rtype` defined in `runtime`. +func (a *Allocator) MarkAsOpaquePointer(t reflect.Type) { + if t.Kind() != reflect.Ptr { + return + } + + a.cachedPointerTypes.Store(t, struct{}{}) +} + +// SetCustomFunc sets a custom clone function for type t. +// If t is not struct or pointer to struct, SetCustomFunc ignores t. +// +// If fn is nil, remove the custom clone function for type t. +func (a *Allocator) SetCustomFunc(t reflect.Type, fn Func) { + if fn == nil { + a.cachedCustomFuncTypes.Delete(t) + return + } + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return + } + + a.cachedCustomFuncTypes.Store(t, fn) +} + +func heapNew(pool unsafe.Pointer, t reflect.Type) reflect.Value { + return reflect.New(t) +} + +func heapMakeSlice(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value { + return reflect.MakeSlice(t, len, cap) +} + +func heapMakeMap(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value { + return reflect.MakeMapWithSize(t, n) +} + +func heapMakeChan(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value { + return reflect.MakeChan(t, buffer) +} diff --git a/vendor/github.com/huandu/go-clone/allocatormethods.go b/vendor/github.com/huandu/go-clone/allocatormethods.go new file mode 100644 index 000000000..08a42ff2e --- /dev/null +++ b/vendor/github.com/huandu/go-clone/allocatormethods.go @@ -0,0 +1,115 @@ +// Copyright 2023 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package clone + +import ( + "reflect" + "unsafe" +) + +// AllocatorMethods defines all methods required by allocator. +// If any of these methods is nil, allocator will use default method which allocates memory from heap. +type AllocatorMethods struct { + // Parent is the allocator which handles all unhandled methods. + // If it's nil, it will be the default allocator. + Parent *Allocator + + New func(pool unsafe.Pointer, t reflect.Type) reflect.Value + MakeSlice func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value + MakeMap func(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value + MakeChan func(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value + IsScalar func(k reflect.Kind) bool +} + +func (am *AllocatorMethods) parent() *Allocator { + if am != nil && am.Parent != nil { + return am.Parent + } + + return nil +} + +func (am *AllocatorMethods) new(parent *Allocator, pool unsafe.Pointer) func(pool unsafe.Pointer, t reflect.Type) reflect.Value { + if am != nil && am.New != nil { + return am.New + } + + if parent != nil { + if parent.pool == pool { + return parent.new + } else { + return func(pool unsafe.Pointer, t reflect.Type) reflect.Value { + return parent.New(t) + } + } + } + + return defaultAllocator.new +} + +func (am *AllocatorMethods) makeSlice(parent *Allocator, pool unsafe.Pointer) func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value { + if am != nil && am.MakeSlice != nil { + return am.MakeSlice + } + + if parent != nil { + if parent.pool == pool { + return parent.makeSlice + } else { + return func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value { + return parent.MakeSlice(t, len, cap) + } + } + } + + return defaultAllocator.makeSlice +} + +func (am *AllocatorMethods) makeMap(parent *Allocator, pool unsafe.Pointer) func(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value { + if am != nil && am.MakeMap != nil { + return am.MakeMap + } + + if parent != nil { + if parent.pool == pool { + return parent.makeMap + } else { + return func(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value { + return parent.MakeMap(t, n) + } + } + } + + return defaultAllocator.makeMap +} + +func (am *AllocatorMethods) makeChan(parent *Allocator, pool unsafe.Pointer) func(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value { + if am != nil && am.MakeChan != nil { + return am.MakeChan + } + + if parent != nil { + if parent.pool == pool { + return parent.makeChan + } else { + return func(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value { + return parent.MakeChan(t, buffer) + } + } + } + + return defaultAllocator.makeChan +} + +func (am *AllocatorMethods) isScalar(parent *Allocator) func(t reflect.Kind) bool { + if am != nil && am.IsScalar != nil { + return am.IsScalar + } + + if parent != nil { + return parent.isScalar + } + + return defaultAllocator.isScalar +} diff --git a/vendor/github.com/huandu/go-clone/arena.go b/vendor/github.com/huandu/go-clone/arena.go new file mode 100644 index 000000000..d96bfca69 --- /dev/null +++ b/vendor/github.com/huandu/go-clone/arena.go @@ -0,0 +1,9 @@ +// Copyright 2023 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +//go:build !(go1.20 && goexperiment.arenas) +// +build !go1.20 !goexperiment.arenas + +package clone + +const arenaIsEnabled = false diff --git a/vendor/github.com/huandu/go-clone/arena_go120.go b/vendor/github.com/huandu/go-clone/arena_go120.go new file mode 100644 index 000000000..8c449be5c --- /dev/null +++ b/vendor/github.com/huandu/go-clone/arena_go120.go @@ -0,0 +1,9 @@ +// Copyright 2023 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +//go:build go1.20 && goexperiment.arenas +// +build go1.20,goexperiment.arenas + +package clone + +const arenaIsEnabled = true diff --git a/vendor/github.com/huandu/go-clone/atomic_go119.go b/vendor/github.com/huandu/go-clone/atomic_go119.go new file mode 100644 index 000000000..e674a56ce --- /dev/null +++ b/vendor/github.com/huandu/go-clone/atomic_go119.go @@ -0,0 +1,81 @@ +// Copyright 2019 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package clone + +import ( + "reflect" + "sync/atomic" +) + +func init() { + SetCustomFunc(reflect.TypeOf(atomic.Bool{}), func(allocator *Allocator, old, new reflect.Value) { + if !old.CanAddr() { + return + } + + // Clone value inside atomic.Bool. + oldValue := old.Addr().Interface().(*atomic.Bool) + newValue := new.Addr().Interface().(*atomic.Bool) + v := oldValue.Load() + newValue.Store(v) + }) + SetCustomFunc(reflect.TypeOf(atomic.Int32{}), func(allocator *Allocator, old, new reflect.Value) { + if !old.CanAddr() { + return + } + + // Clone value inside atomic.Int32. + oldValue := old.Addr().Interface().(*atomic.Int32) + newValue := new.Addr().Interface().(*atomic.Int32) + v := oldValue.Load() + newValue.Store(v) + }) + SetCustomFunc(reflect.TypeOf(atomic.Int64{}), func(allocator *Allocator, old, new reflect.Value) { + if !old.CanAddr() { + return + } + + // Clone value inside atomic.Int64. + oldValue := old.Addr().Interface().(*atomic.Int64) + newValue := new.Addr().Interface().(*atomic.Int64) + v := oldValue.Load() + newValue.Store(v) + }) + SetCustomFunc(reflect.TypeOf(atomic.Uint32{}), func(allocator *Allocator, old, new reflect.Value) { + if !old.CanAddr() { + return + } + + // Clone value inside atomic.Uint32. + oldValue := old.Addr().Interface().(*atomic.Uint32) + newValue := new.Addr().Interface().(*atomic.Uint32) + v := oldValue.Load() + newValue.Store(v) + }) + SetCustomFunc(reflect.TypeOf(atomic.Uint64{}), func(allocator *Allocator, old, new reflect.Value) { + if !old.CanAddr() { + return + } + + // Clone value inside atomic.Uint64. + oldValue := old.Addr().Interface().(*atomic.Uint64) + newValue := new.Addr().Interface().(*atomic.Uint64) + v := oldValue.Load() + newValue.Store(v) + }) + SetCustomFunc(reflect.TypeOf(atomic.Uintptr{}), func(allocator *Allocator, old, new reflect.Value) { + if !old.CanAddr() { + return + } + + // Clone value inside atomic.Uintptr. + oldValue := old.Addr().Interface().(*atomic.Uintptr) + newValue := new.Addr().Interface().(*atomic.Uintptr) + v := oldValue.Load() + newValue.Store(v) + }) +} diff --git a/vendor/github.com/huandu/go-clone/clone.go b/vendor/github.com/huandu/go-clone/clone.go new file mode 100644 index 000000000..18a3c73c7 --- /dev/null +++ b/vendor/github.com/huandu/go-clone/clone.go @@ -0,0 +1,891 @@ +// Copyright 2019 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package clone provides functions to deep clone any Go data. +// It also provides a wrapper to protect a pointer from any unexpected mutation. +package clone + +import ( + "fmt" + "reflect" + "unsafe" +) + +var heapCloneState = &cloneState{ + allocator: defaultAllocator, +} +var cloner = MakeCloner(defaultAllocator) + +const zeroBytesCount = 256 + +var zeroBytes [zeroBytesCount]byte +var zero = zeroBytes[:] + +// Clone recursively deep clone v to a new value in heap. +// It assumes that there is no pointer cycle in v, +// e.g. v has a pointer points to v itself. +// If there is a pointer cycle, use Slowly instead. +// +// Clone allocates memory and deeply copies values inside v in depth-first sequence. +// There are a few special rules for following types. +// +// - Scalar types: all number-like types are copied by value. +// - func: Copied by value as func is an opaque pointer at runtime. +// - string: Copied by value as string is immutable by design. +// - unsafe.Pointer: Copied by value as we don't know what's in it. +// - chan: A new empty chan is created as we cannot read data inside the old chan. +// +// Unlike many other packages, Clone is able to clone unexported fields of any struct. +// Use this feature wisely. +func Clone(v interface{}) interface{} { + return cloner.Clone(v) +} + +func clone(allocator *Allocator, v interface{}) interface{} { + if v == nil { + return nil + } + + val := reflect.ValueOf(v) + cloned := allocator.clone(val, false) + return cloned.Interface() +} + +// Slowly recursively deep clone v to a new value in heap. +// It marks all cloned values internally, thus it can clone v with cycle pointer. +// +// Slowly works exactly the same as Clone. See Clone doc for more details. +func Slowly(v interface{}) interface{} { + return cloner.CloneSlowly(v) +} + +func cloneSlowly(allocator *Allocator, v interface{}) interface{} { + if v == nil { + return nil + } + + val := reflect.ValueOf(v) + cloned := allocator.cloneSlowly(val, false) + return cloned.Interface() +} + +type cloneState struct { + allocator *Allocator + visited visitMap + invalid invalidPointers + + // The value that should not be cloned by custom func. + // It's useful to avoid infinite loop when custom func calls allocator.Clone(). + skipCustomFuncValue reflect.Value +} + +type visit struct { + p uintptr + extra int + t reflect.Type +} + +type visitMap map[visit]reflect.Value +type invalidPointers map[visit]reflect.Value + +func (state *cloneState) clone(v reflect.Value) reflect.Value { + if state.allocator.isScalar(v.Kind()) { + return copyScalarValue(v) + } + + switch v.Kind() { + case reflect.Array: + return state.cloneArray(v) + case reflect.Chan: + return state.allocator.MakeChan(v.Type(), v.Cap()) + case reflect.Interface: + return state.cloneInterface(v) + case reflect.Map: + return state.cloneMap(v) + case reflect.Ptr: + return state.clonePtr(v) + case reflect.Slice: + return state.cloneSlice(v) + case reflect.Struct: + return state.cloneStruct(v) + case reflect.String: + return state.cloneString(v) + default: + panic(fmt.Errorf("go-clone: unsupported type `%v`", v.Type())) + } +} + +func (state *cloneState) cloneArray(v reflect.Value) reflect.Value { + dst := state.allocator.New(v.Type()) + state.copyArray(v, dst) + return dst.Elem() +} + +func (state *cloneState) copyArray(src, nv reflect.Value) { + p := unsafe.Pointer(nv.Pointer()) // dst must be a Ptr. + dst := nv.Elem() + num := src.Len() + + if state.allocator.isScalar(src.Type().Elem().Kind()) { + shadowCopy(src, p) + return + } + + for i := 0; i < num; i++ { + dst.Index(i).Set(state.clone(src.Index(i))) + } +} + +func (state *cloneState) cloneInterface(v reflect.Value) reflect.Value { + if v.IsNil() { + return reflect.Zero(v.Type()) + } + + t := v.Type() + elem := v.Elem() + return state.clone(elem).Convert(elem.Type()).Convert(t) +} + +func (state *cloneState) cloneMap(v reflect.Value) reflect.Value { + if v.IsNil() { + return reflect.Zero(v.Type()) + } + + t := v.Type() + + if state.visited != nil { + vst := visit{ + p: v.Pointer(), + t: t, + } + + if val, ok := state.visited[vst]; ok { + return val + } + } + + nv := state.allocator.MakeMap(t, v.Len()) + + if state.visited != nil { + vst := visit{ + p: v.Pointer(), + t: t, + } + state.visited[vst] = nv + } + + for iter := mapIter(v); iter.Next(); { + key := state.clone(iter.Key()) + value := state.clone(iter.Value()) + nv.SetMapIndex(key, value) + } + + return nv +} + +func (state *cloneState) clonePtr(v reflect.Value) reflect.Value { + if v.IsNil() { + return reflect.Zero(v.Type()) + } + + t := v.Type() + + if state.allocator.isOpaquePointer(t) { + if v.CanInterface() { + return v + } + + ptr := state.allocator.New(t) + p := unsafe.Pointer(ptr.Pointer()) + shadowCopy(v, p) + return ptr.Elem() + } + + if state.visited != nil { + vst := visit{ + p: v.Pointer(), + t: t, + } + + if val, ok := state.visited[vst]; ok { + return val + } + } + + src := v.Elem() + elemType := src.Type() + elemKind := src.Kind() + nv := state.allocator.New(elemType) + + if state.visited != nil { + vst := visit{ + p: v.Pointer(), + t: t, + } + state.visited[vst] = nv + } + + switch elemKind { + case reflect.Struct: + state.copyStruct(src, nv) + case reflect.Array: + state.copyArray(src, nv) + default: + nv.Elem().Set(state.clone(src)) + } + + // If this pointer is the address of a struct field and it's a cycle pointer, + // it may be updated. + if state.visited != nil { + vst := visit{ + p: v.Pointer(), + t: t, + } + nv = state.visited[vst] + } + + return nv +} + +func (state *cloneState) cloneSlice(v reflect.Value) reflect.Value { + if v.IsNil() { + return reflect.Zero(v.Type()) + } + + t := v.Type() + num := v.Len() + + if state.visited != nil { + vst := visit{ + p: v.Pointer(), + extra: num, + t: t, + } + + if val, ok := state.visited[vst]; ok { + return val + } + } + + c := v.Cap() + nv := state.allocator.MakeSlice(t, num, c) + + if state.visited != nil { + vst := visit{ + p: v.Pointer(), + extra: num, + t: t, + } + state.visited[vst] = nv + } + + // For scalar slice, copy underlying values directly. + if state.allocator.isScalar(t.Elem().Kind()) { + src := unsafe.Pointer(v.Pointer()) + dst := unsafe.Pointer(nv.Pointer()) + sz := int(t.Elem().Size()) + l := num * sz + cc := c * sz + copy((*[maxByteSize]byte)(dst)[:l:cc], (*[maxByteSize]byte)(src)[:l:cc]) + } else { + for i := 0; i < num; i++ { + nv.Index(i).Set(state.clone(v.Index(i))) + } + } + + return nv +} + +func (state *cloneState) cloneStruct(v reflect.Value) reflect.Value { + t := v.Type() + nv := state.allocator.New(t) + state.copyStruct(v, nv) + return nv.Elem() +} + +var typeOfByteSlice = reflect.TypeOf([]byte(nil)) + +func (state *cloneState) cloneString(v reflect.Value) reflect.Value { + t := v.Type() + l := v.Len() + data := state.allocator.MakeSlice(typeOfByteSlice, l, l) + + // The v is an unexported struct field. + if !v.CanInterface() { + v = reflect.ValueOf(v.String()) + } + + reflect.Copy(data, v) + + nv := state.allocator.New(t) + slice := data.Interface().([]byte) + *(*stringHeader)(unsafe.Pointer(nv.Pointer())) = *(*stringHeader)(unsafe.Pointer(&slice)) + + return nv.Elem() +} + +func (state *cloneState) copyStruct(src, nv reflect.Value) { + t := src.Type() + st := state.allocator.loadStructType(t) + ptr := unsafe.Pointer(nv.Pointer()) + + if st.Init(state.allocator, src, nv, state.skipCustomFuncValue == src) { + return + } + + for _, pf := range st.ZeroFields { + p := unsafe.Pointer(uintptr(ptr) + pf.Offset) + sz := pf.Size + + for sz > zeroBytesCount { + copy((*[zeroBytesCount]byte)(p)[:zeroBytesCount:zeroBytesCount], zero) + sz -= zeroBytesCount + p = unsafe.Pointer(uintptr(p) + zeroBytesCount) + } + + copy((*[zeroBytesCount]byte)(p)[:sz:sz], zero) + } + + for _, pf := range st.PointerFields { + i := int(pf.Index) + p := unsafe.Pointer(uintptr(ptr) + pf.Offset) + field := src.Field(i) + + // This field can be referenced by a pointer or interface inside itself. + // Put the pointer to this field to visited to avoid any error. + // + // See https://github.com/huandu/go-clone/issues/3. + if state.visited != nil && field.CanAddr() { + ft := field.Type() + fp := field.Addr().Pointer() + vst := visit{ + p: fp, + t: reflect.PtrTo(ft), + } + nv := reflect.NewAt(ft, p) + + // The address of this field was visited, so fp must be a cycle pointer. + // As this field is not fully cloned, the val stored in visited[visit] must be wrong. + // It must be replaced by nv which will be the right value (it's incomplete right now). + // + // Unfortunately, if the val was used by previous clone routines, + // there is no easy way to fix wrong values - all pointers must be traversed and fixed. + if val, ok := state.visited[vst]; ok { + state.invalid[visit{ + p: val.Pointer(), + t: vst.t, + }] = nv + } + + state.visited[vst] = nv + } + + v := state.clone(field) + shadowCopy(v, p) + } +} + +var typeOfString = reflect.TypeOf("") + +func shadowCopy(src reflect.Value, p unsafe.Pointer) { + switch src.Kind() { + case reflect.Bool: + *(*bool)(p) = src.Bool() + case reflect.Int: + *(*int)(p) = int(src.Int()) + case reflect.Int8: + *(*int8)(p) = int8(src.Int()) + case reflect.Int16: + *(*int16)(p) = int16(src.Int()) + case reflect.Int32: + *(*int32)(p) = int32(src.Int()) + case reflect.Int64: + *(*int64)(p) = src.Int() + case reflect.Uint: + *(*uint)(p) = uint(src.Uint()) + case reflect.Uint8: + *(*uint8)(p) = uint8(src.Uint()) + case reflect.Uint16: + *(*uint16)(p) = uint16(src.Uint()) + case reflect.Uint32: + *(*uint32)(p) = uint32(src.Uint()) + case reflect.Uint64: + *(*uint64)(p) = src.Uint() + case reflect.Uintptr: + *(*uintptr)(p) = uintptr(src.Uint()) + case reflect.Float32: + *(*float32)(p) = float32(src.Float()) + case reflect.Float64: + *(*float64)(p) = src.Float() + case reflect.Complex64: + *(*complex64)(p) = complex64(src.Complex()) + case reflect.Complex128: + *(*complex128)(p) = src.Complex() + + case reflect.Array: + t := src.Type() + + if src.CanAddr() { + srcPtr := unsafe.Pointer(src.UnsafeAddr()) + sz := t.Size() + copy((*[maxByteSize]byte)(p)[:sz:sz], (*[maxByteSize]byte)(srcPtr)[:sz:sz]) + return + } + + val := reflect.NewAt(t, p).Elem() + + if src.CanInterface() { + val.Set(src) + return + } + + sz := t.Elem().Size() + num := src.Len() + + for i := 0; i < num; i++ { + elemPtr := unsafe.Pointer(uintptr(p) + uintptr(i)*sz) + shadowCopy(src.Index(i), elemPtr) + } + case reflect.Chan: + *((*uintptr)(p)) = src.Pointer() + case reflect.Func: + t := src.Type() + src = copyScalarValue(src) + val := reflect.NewAt(t, p).Elem() + val.Set(src) + case reflect.Interface: + *((*interfaceData)(p)) = parseReflectValue(src) + case reflect.Map: + *((*uintptr)(p)) = src.Pointer() + case reflect.Ptr: + *((*uintptr)(p)) = src.Pointer() + case reflect.Slice: + *(*sliceHeader)(p) = sliceHeader{ + Data: src.Pointer(), + Len: src.Len(), + Cap: src.Cap(), + } + case reflect.String: + s := src.String() + val := reflect.NewAt(typeOfString, p).Elem() + val.SetString(s) + case reflect.Struct: + t := src.Type() + val := reflect.NewAt(t, p).Elem() + + if src.CanInterface() { + val.Set(src) + return + } + + num := t.NumField() + + for i := 0; i < num; i++ { + field := t.Field(i) + fieldPtr := unsafe.Pointer(uintptr(p) + field.Offset) + shadowCopy(src.Field(i), fieldPtr) + } + case reflect.UnsafePointer: + // There is no way to copy unsafe.Pointer value. + *((*uintptr)(p)) = src.Pointer() + + default: + panic(fmt.Errorf("go-clone: impossible type `%v` when cloning private field", src.Type())) + } +} + +// fix tranverses v to update all pointer values in state.invalid. +func (state *cloneState) fix(v reflect.Value) { + if state == nil || len(state.invalid) == 0 { + return + } + + fix := &fixState{ + allocator: state.allocator, + fixed: fixMap{}, + invalid: state.invalid, + } + fix.fix(v) +} + +type fixState struct { + allocator *Allocator + fixed fixMap + invalid invalidPointers +} + +type fixMap map[visit]struct{} + +func (fix *fixState) new(t reflect.Type) reflect.Value { + return fix.allocator.New(t) +} + +func (fix *fixState) fix(v reflect.Value) (copied reflect.Value, changed int) { + if fix.allocator.isScalar(v.Kind()) { + return + } + + switch v.Kind() { + case reflect.Array: + return fix.fixArray(v) + case reflect.Chan: + // Do nothing. + return + case reflect.Interface: + return fix.fixInterface(v) + case reflect.Map: + return fix.fixMap(v) + case reflect.Ptr: + return fix.fixPtr(v) + case reflect.Slice: + return fix.fixSlice(v) + case reflect.Struct: + return fix.fixStruct(v) + case reflect.String: + // Do nothing. + return + default: + panic(fmt.Errorf("go-clone: unsupported type `%v`", v.Type())) + } +} + +func (fix *fixState) fixArray(v reflect.Value) (copied reflect.Value, changed int) { + t := v.Type() + et := t.Elem() + kind := et.Kind() + + if fix.allocator.isScalar(kind) { + return + } + + l := v.Len() + + for i := 0; i < l; i++ { + elem := v.Index(i) + + if kind == reflect.Ptr { + vst := visit{ + p: elem.Pointer(), + t: et, + } + + if nv, ok := fix.invalid[vst]; ok { + // If elem cannot be set, v must be copied to make it settable. + // Don't do it unless there is no other choices. + if !elem.CanSet() { + copied = fix.new(t).Elem() + shadowCopy(v, unsafe.Pointer(copied.Addr().Pointer())) + _, changed = fix.fixArray(copied) + return + } + + elem.Set(nv) + changed++ + continue + } + } + + fixed, c := fix.fix(elem) + changed += c + + if fixed.IsValid() { + // If elem cannot be set, v must be copied to make it settable. + // Don't do it unless there is no other choices. + if !elem.CanSet() { + copied = fix.new(t).Elem() + shadowCopy(v, unsafe.Pointer(copied.Addr().Pointer())) + _, changed = fix.fixArray(copied) + return + } + + elem.Set(fixed) + } + } + + return +} + +func (fix *fixState) fixInterface(v reflect.Value) (copied reflect.Value, changed int) { + if v.IsNil() { + return + } + + elem := v.Elem() + t := elem.Type() + kind := elem.Kind() + + if kind == reflect.Ptr { + vst := visit{ + p: elem.Pointer(), + t: t, + } + + if nv, ok := fix.invalid[vst]; ok { + copied = nv.Convert(v.Type()) + changed++ + return + } + } + + copied, changed = fix.fix(elem) + + if copied.IsValid() { + copied = copied.Convert(v.Type()) + } + + return +} + +func (fix *fixState) fixMap(v reflect.Value) (copied reflect.Value, changed int) { + if v.IsNil() { + return + } + + t := v.Type() + vst := visit{ + p: v.Pointer(), + t: t, + } + + if _, ok := fix.fixed[vst]; ok { + return + } + + fix.fixed[vst] = struct{}{} + + kt := t.Key() + et := t.Elem() + keyKind := kt.Kind() + elemKind := et.Kind() + + if isScalar := fix.allocator.isScalar; isScalar(keyKind) && isScalar(elemKind) { + return + } + + invalidKeys := map[reflect.Value][2]reflect.Value{} + + for iter := mapIter(v); iter.Next(); { + key := iter.Key() + elem := iter.Value() + var fixed reflect.Value + c := 0 + + if elemKind == reflect.Ptr { + vst := visit{ + p: elem.Pointer(), + t: et, + } + + if nv, ok := fix.invalid[vst]; ok { + fixed = nv + c++ + } else { + fixed, c = fix.fixPtr(elem) + } + } else { + fixed, c = fix.fix(elem) + } + + changed += c + c = 0 + + if fixed.IsValid() { + v = forceSetMapIndex(v, key, fixed) + elem = fixed + fixed = reflect.Value{} + } + + if keyKind == reflect.Ptr { + vst := visit{ + p: key.Pointer(), + t: kt, + } + + if nv, ok := fix.invalid[vst]; ok { + fixed = nv + c++ + } else { + fixed, c = fix.fixPtr(key) + } + } else { + fixed, c = fix.fix(key) + } + + changed += c + + // Key cannot be changed immediately inside map range iteration. + // Do it later. + if fixed.IsValid() { + invalidKeys[key] = [2]reflect.Value{fixed, elem} + } + } + + for key, kv := range invalidKeys { + v = forceSetMapIndex(v, key, reflect.Value{}) + v = forceSetMapIndex(v, kv[0], kv[1]) + } + + return +} + +func forceSetMapIndex(v, key, elem reflect.Value) (nv reflect.Value) { + nv = v + + if !v.CanInterface() { + nv = forceClearROFlag(v) + } + + if !key.CanInterface() { + key = forceClearROFlag(key) + } + + if elem.IsValid() && !elem.CanInterface() { + elem = forceClearROFlag(elem) + } + + nv.SetMapIndex(key, elem) + return +} + +func (fix *fixState) fixPtr(v reflect.Value) (copied reflect.Value, changed int) { + if v.IsNil() { + return + } + + vst := visit{ + p: v.Pointer(), + t: v.Type(), + } + + if _, ok := fix.invalid[vst]; ok { + panic(fmt.Errorf("go-clone: invalid pointers must have been fixed in other methods")) + } + + if _, ok := fix.fixed[vst]; ok { + return + } + + fix.fixed[vst] = struct{}{} + + elem := v.Elem() + _, changed = fix.fix(elem) + return +} + +func (fix *fixState) fixSlice(v reflect.Value) (copied reflect.Value, changed int) { + if v.IsNil() { + return + } + + t := v.Type() + et := t.Elem() + kind := et.Kind() + + if fix.allocator.isScalar(kind) { + return + } + + l := v.Len() + p := unsafe.Pointer(v.Pointer()) + vst := visit{ + p: uintptr(p), + extra: l, + t: t, + } + + if _, ok := fix.fixed[vst]; ok { + return + } + + fix.fixed[vst] = struct{}{} + + for i := 0; i < l; i++ { + elem := v.Index(i) + var fixed reflect.Value + c := 0 + + if kind == reflect.Ptr { + vst := visit{ + p: elem.Pointer(), + t: et, + } + + if nv, ok := fix.invalid[vst]; ok { + fixed = nv + } else { + fixed, c = fix.fixPtr(elem) + } + } else { + fixed, c = fix.fix(elem) + } + + changed += c + + if fixed.IsValid() { + sz := et.Size() + elemPtr := unsafe.Pointer(uintptr(p) + sz*uintptr(i)) + shadowCopy(fixed, elemPtr) + } + } + + return +} + +func (fix *fixState) fixStruct(v reflect.Value) (copied reflect.Value, changed int) { + t := v.Type() + st := fix.allocator.loadStructType(t) + + if len(st.PointerFields) == 0 { + return + } + + for _, pf := range st.PointerFields { + i := int(pf.Index) + field := v.Field(i) + + ft := field.Type() + + if ft.Kind() == reflect.Ptr { + vst := visit{ + p: field.Pointer(), + t: ft, + } + + if nv, ok := fix.invalid[vst]; ok { + // If v is not addressable, a new struct must be allocated. + // Don't do it unless there is no other choices. + if !v.CanAddr() { + copied = fix.new(t).Elem() + shadowCopy(v, unsafe.Pointer(copied.Addr().Pointer())) + _, changed = fix.fixStruct(copied) + return + } + + ptr := unsafe.Pointer(v.Addr().Pointer()) + p := unsafe.Pointer(uintptr(ptr) + pf.Offset) + shadowCopy(nv, p) + continue + } + } + + fixed, c := fix.fix(field) + changed += c + + if fixed.IsValid() { + // If v is not addressable, a new struct must be allocated. + // Don't do it unless there is no other choices. + if !v.CanAddr() { + copied = fix.new(t).Elem() + shadowCopy(v, unsafe.Pointer(copied.Addr().Pointer())) + _, changed = fix.fixStruct(copied) + return + } + + ptr := unsafe.Pointer(v.Addr().Pointer()) + p := unsafe.Pointer(uintptr(ptr) + pf.Offset) + shadowCopy(fixed, p) + } + } + + return +} diff --git a/vendor/github.com/huandu/go-clone/cloner.go b/vendor/github.com/huandu/go-clone/cloner.go new file mode 100644 index 000000000..3ade7e10a --- /dev/null +++ b/vendor/github.com/huandu/go-clone/cloner.go @@ -0,0 +1,27 @@ +// Copyright 2023 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package clone + +// Cloner implements clone API with given allocator. +type Cloner struct { + allocator *Allocator +} + +// MakeCloner creates a cloner with given allocator. +func MakeCloner(allocator *Allocator) Cloner { + return Cloner{ + allocator: allocator, + } +} + +// Clone clones v with given allocator. +func (c Cloner) Clone(v interface{}) interface{} { + return clone(c.allocator, v) +} + +// CloneSlowly clones v with given allocator. +// It can clone v with cycle pointer. +func (c Cloner) CloneSlowly(v interface{}) interface{} { + return cloneSlowly(c.allocator, v) +} diff --git a/vendor/github.com/huandu/go-clone/headers.go b/vendor/github.com/huandu/go-clone/headers.go new file mode 100644 index 000000000..55c800049 --- /dev/null +++ b/vendor/github.com/huandu/go-clone/headers.go @@ -0,0 +1,12 @@ +// Copyright 2019 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package clone + +import "reflect" + +// As golint reports warning on possible misuse of these headers, +// avoid to use these header types directly to silience golint. + +type sliceHeader reflect.SliceHeader +type stringHeader reflect.StringHeader diff --git a/vendor/github.com/huandu/go-clone/interfacedata.go b/vendor/github.com/huandu/go-clone/interfacedata.go new file mode 100644 index 000000000..076b7f464 --- /dev/null +++ b/vendor/github.com/huandu/go-clone/interfacedata.go @@ -0,0 +1,50 @@ +package clone + +import ( + "reflect" + "unsafe" +) + +const sizeOfPointers = unsafe.Sizeof((interface{})(0)) / unsafe.Sizeof(uintptr(0)) + +// interfaceData is the underlying data of an interface. +// As the reflect.Value's interfaceData method is deprecated, +// it may be broken in any Go release. +// It's better to create a custom to hold the data. +// +// The type of interfaceData fields must be poniters. +// It's a way to cheat Go compile to generate calls to write barrier +// when copying interfaces. +type interfaceData struct { + _ [sizeOfPointers]unsafe.Pointer +} + +var reflectValuePtrOffset uintptr + +func init() { + t := reflect.TypeOf(reflect.Value{}) + found := false + fields := t.NumField() + + for i := 0; i < fields; i++ { + field := t.Field(i) + + if field.Type.Kind() == reflect.UnsafePointer { + found = true + reflectValuePtrOffset = field.Offset + break + } + } + + if !found { + panic("go-clone: fail to find internal ptr field in reflect.Value") + } +} + +// parseReflectValue returns the underlying interface data in a reflect value. +// It assumes that v is an interface value. +func parseReflectValue(v reflect.Value) interfaceData { + pv := (unsafe.Pointer)(uintptr(unsafe.Pointer(&v)) + reflectValuePtrOffset) + ptr := *(*unsafe.Pointer)(pv) + return *(*interfaceData)(ptr) +} diff --git a/vendor/github.com/huandu/go-clone/mapiter.go b/vendor/github.com/huandu/go-clone/mapiter.go new file mode 100644 index 000000000..0c0463fc0 --- /dev/null +++ b/vendor/github.com/huandu/go-clone/mapiter.go @@ -0,0 +1,41 @@ +// Copyright 2019 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// +build !go1.12 + +package clone + +import ( + "reflect" +) + +type iter struct { + m reflect.Value + k reflect.Value + keys []reflect.Value +} + +func mapIter(m reflect.Value) *iter { + return &iter{ + m: m, + keys: m.MapKeys(), + } +} + +func (it *iter) Next() bool { + if len(it.keys) == 0 { + return false + } + + it.k = it.keys[0] + it.keys = it.keys[1:] + return true +} + +func (it *iter) Key() reflect.Value { + return it.k +} + +func (it *iter) Value() reflect.Value { + return it.m.MapIndex(it.k) +} diff --git a/vendor/github.com/huandu/go-clone/mapiter_go112.go b/vendor/github.com/huandu/go-clone/mapiter_go112.go new file mode 100644 index 000000000..f2e937c40 --- /dev/null +++ b/vendor/github.com/huandu/go-clone/mapiter_go112.go @@ -0,0 +1,14 @@ +// Copyright 2019 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// +build go1.12 + +package clone + +import ( + "reflect" +) + +func mapIter(m reflect.Value) *reflect.MapIter { + return m.MapRange() +} diff --git a/vendor/github.com/huandu/go-clone/memory.go b/vendor/github.com/huandu/go-clone/memory.go new file mode 100644 index 000000000..a34b3528c --- /dev/null +++ b/vendor/github.com/huandu/go-clone/memory.go @@ -0,0 +1,11 @@ +// Copyright 2019 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package clone + +// maxByteSize is a large enough value to cheat Go compiler +// when converting unsafe address to []byte. +// It's not actually used in runtime. +// +// The value 2^30 is the max value AFAIK to make Go compiler happy on all archs. +const maxByteSize = 1 << 30 diff --git a/vendor/github.com/huandu/go-clone/structtype.go b/vendor/github.com/huandu/go-clone/structtype.go new file mode 100644 index 000000000..933dc9c2b --- /dev/null +++ b/vendor/github.com/huandu/go-clone/structtype.go @@ -0,0 +1,311 @@ +// Copyright 2019 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package clone + +import ( + "crypto/elliptic" + "fmt" + "reflect" + "sync" + "sync/atomic" + "time" + "unsafe" +) + +type structType struct { + ZeroFields []structFieldSize + PointerFields []structFieldType + fn Func +} + +type structFieldSize struct { + Offset uintptr // The offset from the beginning of the struct. + Size uintptr // The size of the field. +} + +type structFieldType struct { + Offset uintptr // The offset from the beginning of the struct. + Index int // The index of the field. +} + +var zeroStructType = structType{} + +func init() { + // Some well-known scalar-like structs. + MarkAsScalar(reflect.TypeOf(time.Time{})) + MarkAsScalar(reflect.TypeOf(reflect.Value{})) + + // Special case for elliptic.Curve which is used by TLS ECC certificate. + // Package crypto/tls uses elliptic.Curve as enum values + // so that they should be treated as opaque pointers. + // + // As elliptic.Curve is an interface, it can be *elliptic.CurveParam or elliptic.p256Curve. + MarkAsOpaquePointer(reflect.TypeOf(&elliptic.CurveParams{})) + curves := []elliptic.Curve{ + elliptic.P224(), + elliptic.P256(), + elliptic.P384(), + elliptic.P521(), + } + + for _, curve := range curves { + MarkAsOpaquePointer(reflect.ValueOf(curve).Type()) + } + + // Special case for reflect.Type (actually *reflect.rtype): + // The *reflect.rtype should not be copied as it is immutable and + // may point to a variable that actual type is not reflect.rtype, + // e.g. *reflect.arrayType or *reflect.chanType. + MarkAsOpaquePointer(reflect.TypeOf(reflect.TypeOf(0))) + + // Some well-known no-copy structs. + // + // Almost all structs defined in package "sync" and "sync/atomic" are set + // except `sync.Once` which can be safely cloned with a correct done value. + SetCustomFunc(reflect.TypeOf(sync.Mutex{}), emptyCloneFunc) + SetCustomFunc(reflect.TypeOf(sync.RWMutex{}), emptyCloneFunc) + SetCustomFunc(reflect.TypeOf(sync.WaitGroup{}), emptyCloneFunc) + SetCustomFunc(reflect.TypeOf(sync.Cond{}), func(allocator *Allocator, old, new reflect.Value) { + // Copy the New func from old value. + oldL := old.FieldByName("L") + newL := allocator.Clone(oldL) + new.FieldByName("L").Set(newL) + }) + SetCustomFunc(reflect.TypeOf(sync.Pool{}), func(allocator *Allocator, old, new reflect.Value) { + // Copy the New func from old value. + oldFn := old.FieldByName("New") + newFn := allocator.Clone(oldFn) + new.FieldByName("New").Set(newFn) + }) + SetCustomFunc(reflect.TypeOf(sync.Map{}), func(allocator *Allocator, old, new reflect.Value) { + if !old.CanAddr() { + return + } + + // Clone all values inside sync.Map. + oldMap := old.Addr().Interface().(*sync.Map) + newMap := new.Addr().Interface().(*sync.Map) + oldMap.Range(func(key, value interface{}) bool { + k := clone(allocator, key) + v := clone(allocator, value) + newMap.Store(k, v) + return true + }) + }) + SetCustomFunc(reflect.TypeOf(atomic.Value{}), func(allocator *Allocator, old, new reflect.Value) { + if !old.CanAddr() { + return + } + + // Clone value inside atomic.Value. + oldValue := old.Addr().Interface().(*atomic.Value) + newValue := new.Addr().Interface().(*atomic.Value) + + if v := oldValue.Load(); v != nil { + cloned := clone(allocator, v) + newValue.Store(cloned) + } + }) +} + +// MarkAsScalar marks t as a scalar type in heap allocator, +// so that all clone methods will copy t by value. +// If t is not struct or pointer to struct, MarkAsScalar ignores t. +// +// In the most cases, it's not necessary to call it explicitly. +// If a struct type contains scalar type fields only, the struct will be marked as scalar automatically. +// +// Here is a list of types marked as scalar by default: +// - time.Time +// - reflect.Value +func MarkAsScalar(t reflect.Type) { + defaultAllocator.MarkAsScalar(t) +} + +// MarkAsOpaquePointer marks t as an opaque pointer in heap allocator, +// so that all clone methods will copy t by value. +// If t is not a pointer, MarkAsOpaquePointer ignores t. +// +// Here is a list of types marked as opaque pointers by default: +// - `elliptic.Curve`, which is `*elliptic.CurveParam` or `elliptic.p256Curve`; +// - `reflect.Type`, which is `*reflect.rtype` defined in `runtime`. +func MarkAsOpaquePointer(t reflect.Type) { + defaultAllocator.MarkAsOpaquePointer(t) +} + +// Func is a custom func to clone value from old to new. +// The new is a zero value +// which `new.CanSet()` and `new.CanAddr()` is guaranteed to be true. +// +// Func must update the new to return result. +type Func func(allocator *Allocator, old, new reflect.Value) + +// emptyCloneFunc is used to disable shadow copy. +// It's useful when cloning sync.Mutex as cloned value must be a zero value. +func emptyCloneFunc(allocator *Allocator, old, new reflect.Value) {} + +// SetCustomFunc sets a custom clone function for type t in heap allocator. +// If t is not struct or pointer to struct, SetCustomFunc ignores t. +// +// If fn is nil, remove the custom clone function for type t. +func SetCustomFunc(t reflect.Type, fn Func) { + defaultAllocator.SetCustomFunc(t, fn) +} + +// Init creates a new value of src.Type() and shadow copies all content from src. +// If noCustomFunc is set to true, custom clone function will be ignored. +// +// Init returns true if the value is cloned by a custom func. +// Caller should skip cloning struct fields in depth. +func (st *structType) Init(allocator *Allocator, src, nv reflect.Value, noCustomFunc bool) (done bool) { + dst := nv.Elem() + + if !noCustomFunc && st.fn != nil { + if !src.CanInterface() { + src = forceClearROFlag(src) + } + + st.fn(allocator, src, dst) + done = true + return + } + + ptr := unsafe.Pointer(nv.Pointer()) + shadowCopy(src, ptr) + done = len(st.PointerFields) == 0 + return +} + +func (st *structType) CanShadowCopy() bool { + return len(st.PointerFields) == 0 && st.fn == nil +} + +// IsScalar returns true if k should be considered as a scalar type. +// +// For the sake of performance, string is considered as a scalar type unless arena is enabled. +// If we need to deep copy string value in some cases, we can create a new allocator with custom isScalar function +// in which we can return false when k is reflect.String. +// +// // Create a new allocator which treats string as non-scalar type. +// allocator := NewAllocator(nil, &AllocatorMethods{ +// IsScalar: func(k reflect.Kind) bool { +// return k != reflect.String && IsScalar(k) +// }, +// }) +func IsScalar(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.Complex64, reflect.Complex128, + reflect.Func, + reflect.UnsafePointer, + reflect.Invalid: + return true + + case reflect.String: + // If arena is not enabled, string can be copied as scalar safely + // as it's immutable by design. + return !arenaIsEnabled + } + + return false +} + +func copyScalarValue(src reflect.Value) reflect.Value { + if src.CanInterface() { + return src + } + + dst := newScalarValue(src) + return dst.Convert(src.Type()) +} + +func newScalarValue(src reflect.Value) reflect.Value { + // src is an unexported field value. Copy its value. + switch src.Kind() { + case reflect.Bool: + return reflect.ValueOf(src.Bool()) + + case reflect.Int: + return reflect.ValueOf(int(src.Int())) + case reflect.Int8: + return reflect.ValueOf(int8(src.Int())) + case reflect.Int16: + return reflect.ValueOf(int16(src.Int())) + case reflect.Int32: + return reflect.ValueOf(int32(src.Int())) + case reflect.Int64: + return reflect.ValueOf(src.Int()) + + case reflect.Uint: + return reflect.ValueOf(uint(src.Uint())) + case reflect.Uint8: + return reflect.ValueOf(uint8(src.Uint())) + case reflect.Uint16: + return reflect.ValueOf(uint16(src.Uint())) + case reflect.Uint32: + return reflect.ValueOf(uint32(src.Uint())) + case reflect.Uint64: + return reflect.ValueOf(src.Uint()) + case reflect.Uintptr: + return reflect.ValueOf(uintptr(src.Uint())) + + case reflect.Float32: + return reflect.ValueOf(float32(src.Float())) + case reflect.Float64: + return reflect.ValueOf(src.Float()) + + case reflect.Complex64: + return reflect.ValueOf(complex64(src.Complex())) + case reflect.Complex128: + return reflect.ValueOf(src.Complex()) + + case reflect.String: + return reflect.ValueOf(src.String()) + case reflect.Func: + t := src.Type() + + if src.IsNil() { + return reflect.Zero(t) + } + + // Don't use this trick unless we have no choice. + return forceClearROFlag(src) + case reflect.UnsafePointer: + return reflect.ValueOf(unsafe.Pointer(src.Pointer())) + } + + panic(fmt.Errorf("go-clone: impossible type `%v` when cloning private field", src.Type())) +} + +var typeOfInterface = reflect.TypeOf((*interface{})(nil)).Elem() + +// forceClearROFlag clears all RO flags in v to make v accessible. +// It's a hack based on the fact that InterfaceData is always available on RO data. +// This hack can be broken in any Go version. +// Don't use it unless we have no choice, e.g. copying func in some edge cases. +func forceClearROFlag(v reflect.Value) reflect.Value { + var i interface{} + indirect := 0 + + // Save flagAddr. + for v.CanAddr() { + v = v.Addr() + indirect++ + } + + v = v.Convert(typeOfInterface) + nv := reflect.ValueOf(&i) + *(*interfaceData)(unsafe.Pointer(nv.Pointer())) = parseReflectValue(v) + cleared := nv.Elem().Elem() + + for indirect > 0 { + cleared = cleared.Elem() + indirect-- + } + + return cleared +} diff --git a/vendor/github.com/huandu/go-clone/wrapper.go b/vendor/github.com/huandu/go-clone/wrapper.go new file mode 100644 index 000000000..4a0de7897 --- /dev/null +++ b/vendor/github.com/huandu/go-clone/wrapper.go @@ -0,0 +1,166 @@ +// Copyright 2019 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package clone + +import ( + "encoding/binary" + "hash/crc64" + "reflect" + "sync" + "unsafe" +) + +var ( + sizeOfChecksum = unsafe.Sizeof(uint64(0)) + + crc64Table = crc64.MakeTable(crc64.ECMA) + + cachedWrapperTypes sync.Map +) + +// Wrap creates a wrapper of v, which must be a pointer. +// If v is not a pointer, Wrap simply returns v and do nothing. +// +// The wrapper is a deep clone of v's value. It holds a shadow copy to v internally. +// +// t := &T{Foo: 123} +// v := Wrap(t).(*T) // v is a clone of t. +// reflect.DeepEqual(t, v) == true // v equals t. +// v.Foo = 456 // v.Foo is changed, but t.Foo doesn't change. +// orig := Unwrap(v) // Use `Unwrap` to discard wrapper and return original value, which is t. +// orig.(*T) == t // orig and t is exactly the same. +// Undo(v) // Use `Undo` to discard any change on v. +// v.Foo == t.Foo // Now, the value of v and t are the same again. +func Wrap(v interface{}) interface{} { + if v == nil { + return v + } + + val := reflect.ValueOf(v) + pt := val.Type() + + if val.Kind() != reflect.Ptr { + return v + } + + t := pt.Elem() + elem := val.Elem() + ptr := unsafe.Pointer(val.Pointer()) + cache, ok := cachedWrapperTypes.Load(t) + + if !ok { + cache = reflect.StructOf([]reflect.StructField{ + { + Name: "T", + Type: t, + Anonymous: true, + }, + { + Name: "Checksum", + Type: reflect.TypeOf(uint64(0)), + }, + { + Name: "Origin", + Type: pt, + }, + }) + cachedWrapperTypes.Store(t, cache) + } + + wrapperType := cache.(reflect.Type) + pw := defaultAllocator.New(wrapperType) + + wrapperPtr := unsafe.Pointer(pw.Pointer()) + wrapper := pw.Elem() + + // Equivalent code: wrapper.T = Clone(v) + field := wrapper.Field(0) + field.Set(heapCloneState.clone(elem)) + + // Equivalent code: wrapper.Checksum = makeChecksum(v) + checksumPtr := unsafe.Pointer((uintptr(wrapperPtr) + t.Size())) + *(*uint64)(checksumPtr) = makeChecksum(t, uintptr(wrapperPtr), uintptr(ptr)) + + // Equivalent code: wrapper.Origin = v + originPtr := unsafe.Pointer((uintptr(wrapperPtr) + t.Size() + sizeOfChecksum)) + *(*uintptr)(originPtr) = uintptr(ptr) + + return field.Addr().Interface() +} + +func validateChecksum(t reflect.Type, ptr unsafe.Pointer) bool { + pw := uintptr(ptr) + orig := uintptr(getOrigin(t, ptr)) + checksum := *(*uint64)(unsafe.Pointer(uintptr(ptr) + t.Size())) + expected := makeChecksum(t, pw, orig) + + return checksum == expected +} + +func makeChecksum(t reflect.Type, pw uintptr, orig uintptr) uint64 { + var data [binary.MaxVarintLen64 * 2]byte + binary.PutUvarint(data[:binary.MaxVarintLen64], uint64(pw)) + binary.PutUvarint(data[binary.MaxVarintLen64:], uint64(orig)) + return crc64.Checksum(data[:], crc64Table) +} + +func getOrigin(t reflect.Type, ptr unsafe.Pointer) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(uintptr(ptr) + t.Size() + sizeOfChecksum)) +} + +// Unwrap returns v's original value if v is a wrapped value. +// Otherwise, simply returns v itself. +func Unwrap(v interface{}) interface{} { + if v == nil { + return v + } + + val := reflect.ValueOf(v) + + if !isWrapped(val) { + return v + } + + origVal := origin(val) + return origVal.Interface() +} + +func origin(val reflect.Value) reflect.Value { + pt := val.Type() + t := pt.Elem() + ptr := unsafe.Pointer(val.Pointer()) + orig := getOrigin(t, ptr) + origVal := reflect.NewAt(t, orig) + return origVal +} + +// Undo discards any change made in wrapped value. +// If v is not a wrapped value, nothing happens. +func Undo(v interface{}) { + if v == nil { + return + } + + val := reflect.ValueOf(v) + + if !isWrapped(val) { + return + } + + origVal := origin(val) + elem := val.Elem() + elem.Set(heapCloneState.clone(origVal.Elem())) +} + +func isWrapped(val reflect.Value) bool { + pt := val.Type() + + if pt.Kind() != reflect.Ptr { + return false + } + + t := pt.Elem() + ptr := unsafe.Pointer(val.Pointer()) + return validateChecksum(t, ptr) +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/.gitignore b/vendor/github.com/huandu/go-sqlbuilder/.gitignore new file mode 100644 index 000000000..950b23bf7 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/.gitignore @@ -0,0 +1,43 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Intellij +*.iml +.idea/ + +# VS Code +debug +debug_test +.vscode/ + +# Mac +.DS_Store + +# go work +go.work +go.work.sum + +# running the fuzzer locally may create this folder, lets ignore it +testdata/fuzz/ \ No newline at end of file diff --git a/vendor/github.com/huandu/go-sqlbuilder/LICENSE b/vendor/github.com/huandu/go-sqlbuilder/LICENSE new file mode 100644 index 000000000..e37e41f13 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2018 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/huandu/go-sqlbuilder/README.md b/vendor/github.com/huandu/go-sqlbuilder/README.md new file mode 100644 index 000000000..6a88b0114 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/README.md @@ -0,0 +1,518 @@ +# SQL builder for Go + +[![Go](https://github.com/huandu/go-sqlbuilder/workflows/Go/badge.svg)](https://github.com/huandu/go-sqlbuilder/actions) +[![GoDoc](https://godoc.org/github.com/huandu/go-sqlbuilder?status.svg)](https://pkg.go.dev/github.com/huandu/go-sqlbuilder) +[![Go Report](https://goreportcard.com/badge/github.com/huandu/go-sqlbuilder)](https://goreportcard.com/report/github.com/huandu/go-sqlbuilder) +[![Coverage Status](https://coveralls.io/repos/github/huandu/go-sqlbuilder/badge.svg?branch=master)](https://coveralls.io/github/huandu/go-sqlbuilder?branch=master) + +- [Install](#install) +- [Usage](#usage) + - [Basic usage](#basic-usage) + - [Pre-defined SQL builders](#pre-defined-sql-builders) + - [Build `WHERE` clause](#build-where-clause) + - [Share `WHERE` clause among builders](#share-where-clause-among-builders) + - [Build SQL for different systems](#build-sql-for-different-systems) + - [Using `Struct` as a light weight ORM](#using-struct-as-a-light-weight-orm) + - [Nested SQL](#nested-sql) + - [Use `sql.Named` in a builder](#use-sqlnamed-in-a-builder) + - [Argument modifiers](#argument-modifiers) + - [Freestyle builder](#freestyle-builder) + - [Clone builders](#clone-builders) + - [Using special syntax to build SQL](#using-special-syntax-to-build-sql) + - [Interpolate `args` in the `sql`](#interpolate-args-in-the-sql) +- [License](#license) + +The `sqlbuilder` package offers a comprehensive suite of SQL string concatenation utilities. It is designed to facilitate the construction of SQL statements compatible with Go's standard library `sql.DB` and `sql.Stmt` interfaces, focusing on optimizing the performance of SQL statement creation and minimizing memory usage. + +The primary objective of this package's design was to craft a SQL construction library that operates independently of specific database drivers and business logic. It is tailored to accommodate the diverse needs of enterprise environments, including the use of custom database drivers, adherence to specialized operational standards, integration into heterogeneous systems, and handling of non-standard SQL in intricate scenarios. Following its open-source release, the package has undergone extensive testing within a large-scale enterprise context, successfully managing the workload of hundreds of millions of orders daily and nearly ten million transactions daily, thus highlighting its robust performance and scalability. + +This package is not restricted to any particular database driver and does not automatically establish connections with any database systems. It does not presuppose the execution of the generated SQL, making it versatile for a broad spectrum of application scenarios that involve the construction of SQL-like statements. It is equally well-suited for further development aimed at creating more business-specific database interaction packages, ORMs, and similar tools. + +## Install + +Install this package by executing the following command: + +```shell +go get github.com/huandu/go-sqlbuilder +``` + +## Usage + +### Basic usage + +We can rapidly construct SQL statements using this package. + +```go +sql := sqlbuilder.Select("id", "name").From("demo.user"). + Where("status = 1").Limit(10). + String() + +fmt.Println(sql) + +// Output: +// SELECT id, name FROM demo.user WHERE status = 1 LIMIT 10 +``` + +In common scenarios, it is necessary to escape all user inputs. To achieve this, initialize a builder at the outset. + +```go +sb := sqlbuilder.NewSelectBuilder() + +sb.Select("id", "name", sb.As("COUNT(*)", "c")) +sb.From("user") +sb.Where(sb.In("status", 1, 2, 5)) + +sql, args := sb.Build() +fmt.Println(sql) +fmt.Println(args) + +// Output: +// SELECT id, name, COUNT(*) AS c FROM user WHERE status IN (?, ?, ?) +// [1 2 5] +``` + +### Pre-defined SQL builders + +This package includes the following pre-defined builders. API documentation and usage examples are available in the `godoc` online documentation. + +- [Struct](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Struct): Factory for creating builders based on struct definitions. +- [CreateTableBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#CreateTableBuilder): Builder for `CREATE TABLE`. +- [SelectBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#SelectBuilder): Builder for `SELECT`. +- [InsertBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#InsertBuilder): Builder for `INSERT`. +- [UpdateBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#UpdateBuilder): Builder for `UPDATE`. +- [DeleteBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#DeleteBuilder): Builder for `DELETE`. +- [UnionBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#UnionBuilder): Builder for `UNION` and `UNION ALL`. +- [CTEBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#CTEBuilder): Builder for Common Table Expression (CTE), e.g. `WITH name (col1, col2) AS (SELECT ...)`. +- [Buildf](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Buildf): Freestyle builder employing `fmt.Sprintf`-like syntax. +- [Build](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Build): Advanced freestyle builder utilizing special syntax as defined in [Args#Compile](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Args.Compile). +- [BuildNamed](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#BuildNamed): Advanced freestyle builder that uses `${key}` to reference values by key in a map. + +A unique method, `SQL(sql string)`, is implemented across all statement builders, enabling the insertion of any arbitrary SQL segment into a builder during SQL construction. This feature is particularly beneficial for crafting SQL statements that incorporate non-standard syntax required by OLTP or OLAP systems. + +```go +// Build a SQL to create a HIVE table. +sql := sqlbuilder.CreateTable("users"). + SQL("PARTITION BY (year)"). + SQL("AS"). + SQL( + sqlbuilder.Select("columns[0] id", "columns[1] name", "columns[2] year"). + From("`all-users.csv`"). + String(), + ). + String() + +fmt.Println(sql) + +// Output: +// CREATE TABLE users PARTITION BY (year) AS SELECT columns[0] id, columns[1] name, columns[2] year FROM `all-users.csv` +``` + +Below are several utility methods designed to address special cases. + +- [Flatten](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Flatten) enables the recursive conversion of an array-like variable into a flat slice of `[]interface{}`. For example, invoking `Flatten([]interface{"foo", []int{2, 3}})` yields `[]interface{}{"foo", 2, 3}`. This method is compatible with builder methods such as `In`, `NotIn`, `Values`, etc., facilitating the conversion of a typed array into `[]interface{}` or the merging of inputs. +- [List](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#List) operates similarly to `Flatten`, with the exception that its return value is specifically intended for use as builder arguments. For example, `Buildf("my_func(%v)", List([]int{1, 2, 3})).Build()` generates SQL `my_func(?, ?, ?)` with arguments `[]interface{}{1, 2, 3}`. +- [Raw](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Raw) designates a string as a "raw string" within arguments. For instance, `Buildf("SELECT %v", Raw("NOW()")).Build()` results in SQL `SELECT NOW()`. + +For detailed instructions on utilizing these builders, consult the [examples provided on GoDoc](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#pkg-examples). + +### Build `WHERE` clause + +`WHERE` clause is the most important part of a SQL. We can use `Where` method to add one or more conditions to a builder. + +To simplify the construction of `WHERE` clauses, a utility type named `Cond` is provided for condition building. All builders that support `WHERE` clauses possess an anonymous `Cond` field, enabling the invocation of `Cond` methods on these builders. + +```go +sb := sqlbuilder.Select("id").From("user") +sb.Where( + sb.In("status", 1, 2, 5), + sb.Or( + sb.Equal("name", "foo"), + sb.Like("email", "foo@%"), + ), +) + +sql, args := sb.Build() +fmt.Println(sql) +fmt.Println(args) + +// Output: +// SELECT id FROM user WHERE status IN (?, ?, ?) AND (name = ? OR email LIKE ?) +// [1 2 5 foo foo@%] +``` + +There are many methods for building conditions. + +- [Cond.Equal](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Equal)/[Cond.E](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.E)/[Cond.EQ](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.EQ): `field = value`. +- [Cond.NotEqual](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.NotEqual)/[Cond.NE](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.NE)/[Cond.NEQ](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.NEQ): `field <> value`. +- [Cond.GreaterThan](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.GreaterThan)/[Cond.G](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.G)/[Cond.GT](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.GT): `field > value`. +- [Cond.GreaterEqualThan](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.GreaterEqualThan)/[Cond.GE](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.GE)/[Cond.GTE](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.GTE): `field >= value`. +- [Cond.LessThan](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.LessThan)/[Cond.L](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.L)/[Cond.LT](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.LT): `field < value`. +- [Cond.LessEqualThan](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.LessEqualThan)/[Cond.LE](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.LE)/[Cond.LTE](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.LTE): `field <= value`. +- [Cond.In](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.In): `field IN (value1, value2, ...)`. +- [Cond.NotIn](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.NotIn): `field NOT IN (value1, value2, ...)`. +- [Cond.Like](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Like): `field LIKE value`. +- [Cond.ILike](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.ILike): `field ILIKE value`. +- [Cond.NotLike](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.NotLike): `field NOT LIKE value`. +- [Cond.NotILike](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.NotILike): `field NOT ILIKE value`. +- [Cond.Between](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Between): `field BETWEEN lower AND upper`. +- [Cond.NotBetween](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.NotBetween): `field NOT BETWEEN lower AND upper`. +- [Cond.IsNull](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.IsNull): `field IS NULL`. +- [Cond.IsNotNull](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.IsNotNull): `field IS NOT NULL`. +- [Cond.Exists](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Exists): `EXISTS (subquery)`. +- [Cond.NotExists](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.NotExists): `NOT EXISTS (subquery)`. +- [Cond.Not](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Not): `NOT expr`. +- [Cond.Any](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Any): `field op ANY (value1, value2, ...)`. +- [Cond.All](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.All): `field op ALL (value1, value2, ...)`. +- [Cond.Some](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Some): `field op SOME (value1, value2, ...)`. +- [Cond.IsDistinctFrom](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.IsDistinctFrom) `field IS DISTINCT FROM value`. +- [Cond.IsNotDistinctFrom](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.IsNotDistinctFrom) `field IS NOT DISTINCT FROM value`. +- [Cond.Var](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Var): A placeholder for any value. + +There are also some methods to combine conditions. + +- [Cond.And](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.And): Combine conditions with `AND` operator. +- [Cond.Or](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Cond.Or): Combine conditions with `OR` operator. + +### Share `WHERE` clause among builders + +Due to the importance of the `WHERE` statement in SQL, we often need to continuously append conditions and even share some common `WHERE` conditions among different builders. Therefore, we abstract the `WHERE` statement into a `WhereClause` struct, which can be used to create reusable `WHERE` conditions. + +The following example illustrates how to transfer a `WHERE` clause from a `SelectBuilder` to an `UpdateBuilder`. + +```go +// Build a SQL to select a user from database. +sb := Select("name", "level").From("users") +sb.Where( + sb.Equal("id", 1234), +) +fmt.Println(sb) + +ub := Update("users") +ub.Set( + ub.Add("level", 10), +) + +// Set the WHERE clause of UPDATE to the WHERE clause of SELECT. +ub.WhereClause = sb.WhereClause +fmt.Println(ub) + +// Output: +// SELECT name, level FROM users WHERE id = ? +// UPDATE users SET level = level + ? WHERE id = ? +``` + +Refer to the [WhereClause](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#WhereClause) examples to learn its usage. + +### Build SQL for different systems + +SQL syntax and parameter placeholders can differ across systems. To address these variations, this package introduces a concept termed "flavor". + +Currently, flavors such as `MySQL`, `PostgreSQL`, `SQLite`, `SQLServer`, `CQL`, `ClickHouse`, `Presto`, `Oracle` and `Informix` are supported. Should there be a demand for additional flavors, please submit an issue or a pull request. + +By default, all builders utilize `DefaultFlavor` for SQL construction, with `MySQL` as the default setting. + +For greater readibility, `PostgreSQL.NewSelectBuilder()` can be used to instantiate a `SelectBuilder` with the `PostgreSQL` flavor. All builders can be created in this way. + +### Using `Struct` as a light weight ORM + +`Struct` encapsulates type information and struct fields, serving as a builder factory. Utilizing `Struct` methods, one can generate `SELECT`/`INSERT`/`UPDATE`/`DELETE` builders that are pre-configured for use with the struct, thereby conserving time and mitigating the risk of typographical errors in column name entries. + +One can define a struct type and employ field tags to guide `Struct` in generating the appropriate builders. + +```go +type ATable struct { + Field1 string // If a field doesn't has a tag, use "Field1" as column name in SQL. + Field2 int `db:"field2"` // Use "db" in field tag to set column name used in SQL. + Field3 int64 `db:"field3" fieldtag:"foo,bar"` // Set fieldtag to a field. We can call `WithTag` to include fields with tag or `WithoutTag` to exclude fields with tag. + Field4 int64 `db:"field4" fieldtag:"foo"` // If we use `s.WithTag("foo").Select("t")`, columnes of SELECT are "t.field3" and "t.field4". + Field5 string `db:"field5" fieldas:"f5_alias"` // Use "fieldas" in field tag to set a column alias (AS) used in SELECT. + Ignored int32 `db:"-"` // If we set field name as "-", Struct will ignore it. + unexported int // Unexported field is not visible to Struct. + Quoted string `db:"quoted" fieldopt:"withquote"` // Add quote to the field using back quote or double quote. See `Flavor#Quote`. + Empty uint `db:"empty" fieldopt:"omitempty"` // Omit the field in UPDATE if it is a nil or zero value. + + // The `omitempty` can be written as a function. + // In this case, omit empty field `Tagged` when UPDATE for tag `tag1` and `tag3` but not `tag2`. + Tagged string `db:"tagged" fieldopt:"omitempty(tag1,tag3)" fieldtag:"tag1,tag2,tag3"` + + // By default, the `SelectFrom("t")` will add the "t." to all names of fields matched tag. + // We can add dot to field name to disable this behavior. + FieldWithTableAlias string `db:"m.field"` +} +``` + +For detailed instructions on utilizing `Struct`, refer to the [examples](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#Struct). + +Furthermore, `Struct` can be employed as a zero-configuration ORM. Unlike most ORM implementations that necessitate preliminary configurations for database connectivity, `Struct` operates without any configuration, functioning seamlessly with any SQL driver compatible with `database/sql`. `Struct` does not invoke any `database/sql` APIs; it solely generates the appropriate SQL statements with arguments for `DB#Query`/`DB#Exec` or an array of struct field addresses for `Rows#Scan`/`Row#Scan`. + +The following example demonstrates the use of `Struct` as an ORM. It should be relatively straightforward for developers well-versed in `database/sql` APIs. + +```go +type User struct { + ID int64 `db:"id" fieldtag:"pk"` + Name string `db:"name"` + Status int `db:"status"` +} + +// A global variable for creating SQL builders. +// All methods of userStruct are thread-safe. +var userStruct = NewStruct(new(User)) + +func ExampleStruct() { + // Prepare SELECT query. + // SELECT user.id, user.name, user.status FROM user WHERE id = 1234 + sb := userStruct.SelectFrom("user") + sb.Where(sb.Equal("id", 1234)) + + // Execute the query and scan the results into the user struct. + sql, args := sb.Build() + rows, _ := db.Query(sql, args...) + defer rows.Close() + + // Scan row data and set value to user. + // Assuming the following data is retrieved: + // + // | id | name | status | + // |------|--------|--------| + // | 1234 | huandu | 1 | + var user User + rows.Scan(userStruct.Addr(&user)...) + + fmt.Println(sql) + fmt.Println(args) + fmt.Printf("%#v", user) + + // Output: + // SELECT user.id, user.name, user.status FROM user WHERE id = ? + // [1234] + // sqlbuilder.User{ID:1234, Name:"huandu", Status:1} +} +``` + +In numerous production environments, table column names adhere to the snake_case convention, e.g., `user_id`. Conversely, struct fields in Go are typically in CamelCase to maintain public accessibility and satisfy `golint`. Employing the `db` tag for each struct field can be redundant. To streamline this, a field mapper function can be utilized to establish a consistent rule for mapping struct field names to database column names. + +The `DefaultFieldMapper` serves as a global field mapper function, tasked with the conversion of field names to a desired style. By default, it is set to `nil`, effectively performing no action. Recognizing that the majority of table column names follow the snake_case convention, one can assign `DefaultFieldMapper` to `sqlbuilder.SnakeCaseMapper`. For instances that deviate from this norm, a custom mapper can be assigned to a `Struct` via the `WithFieldMapper` method. + +Here are important considerations regarding the field mapper: + +- Field tag has precedence over field mapper function - thus, mapper is ignored if the `db` tag is set; +- Field mapper is called only once on a Struct when the Struct is used to create builder for the first time. + +Refer to the [field mapper function sample](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#FieldMapperFunc) for an illustrative example. + +### Nested SQL + +Creating nested SQL is straightforward: simply use a builder as an argument for nesting. + +Here is an illustrative example. + +```go +sb := sqlbuilder.NewSelectBuilder() +fromSb := sqlbuilder.NewSelectBuilder() +statusSb := sqlbuilder.NewSelectBuilder() + +sb.Select("id") +sb.From(sb.BuilderAs(fromSb, "user"))) +sb.Where(sb.In("status", statusSb)) + +fromSb.Select("id").From("user").Where(fromSb.GreaterThan("level", 4)) +statusSb.Select("status").From("config").Where(statusSb.Equal("state", 1)) + +sql, args := sb.Build() +fmt.Println(sql) +fmt.Println(args) + +// Output: +// SELECT id FROM (SELECT id FROM user WHERE level > ?) AS user WHERE status IN (SELECT status FROM config WHERE state = ?) +// [4 1] +``` + +### Use `sql.Named` in a builder + +The `sql.Named` function, as defined in the `database/sql` package, facilitates the creation of named arguments within SQL statements. This feature is essential for scenarios where an argument needs to be reused multiple times within a single SQL statement. Incorporating named arguments into a builder is straightforward: treat them as regular arguments. + +Here is a sample. + +```go +now := time.Now().Unix() +start := sql.Named("start", now-86400) +end := sql.Named("end", now+86400) +sb := sqlbuilder.NewSelectBuilder() + +sb.Select("name") +sb.From("user") +sb.Where( + sb.Between("created_at", start, end), + sb.GE("modified_at", start), +) + +sql, args := sb.Build() +fmt.Println(sql) +fmt.Println(args) + +// Output: +// SELECT name FROM user WHERE created_at BETWEEN @start AND @end AND modified_at >= @start +// [{{} start 1514458225} {{} end 1514544625}] +``` + +### Argument modifiers + +Several argument modifiers are available: + +- `List(arg)` encapsulates a series of arguments. Given `arg` as a slice or array, for instance, a slice containing three integers, it compiles to `?, ?, ?` and is presented in the final arguments as three individual integers. This serves as a convenience tool, utilizable within `IN` expressions or within the `VALUES` clause of an `INSERT INTO` statement. +- `TupleNames(names)` and `Tuple(values)` facilitate the representation of tuple syntax in SQL. For usage examples, refer to [Tuple](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#example-Tuple). +- `Named(name, arg)` designates a named argument. Functionality is limited to `Build` or `BuildNamed`, where it defines a named placeholder using the syntax `${name}`. +- `Raw(expr)` designates `expr` as a plain string within SQL, as opposed to an argument. During the construction of a builder, raw expressions are directly embedded into the SQL string, omitting the need for `?` placeholders. + +### Freestyle builder + +A builder essentially serves as a means to log arguments. For constructing lengthy SQL statements that incorporate numerous special syntax elements (e.g., special comments intended for a database proxy), `Buildf` can be employed to format the SQL string using a syntax akin to `fmt.Sprintf`. + +```go +sb := sqlbuilder.NewSelectBuilder() +sb.Select("id").From("user") + +explain := sqlbuilder.Buildf("EXPLAIN %v LEFT JOIN SELECT * FROM banned WHERE state IN (%v, %v)", sb, 1, 2) +sql, args := explain.Build() +fmt.Println(sql) +fmt.Println(args) + +// Output: +// EXPLAIN SELECT id FROM user LEFT JOIN SELECT * FROM banned WHERE state IN (?, ?) +// [1 2] +``` + +### Clone builders + +The `Clone` methods make any builder reusable as a template. You can create a partially initialized builder once (even as a global), then call `Clone()` to get an independent copy to customize per request. This avoids repeated setup while keeping shared templates immutable and safe for concurrent use. + +Supported builders with `Clone`: + +- [CreateTableBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#CreateTableBuilder) +- [CTEBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#CTEBuilder) +- [CTEQueryBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#CTEQueryBuilder) +- [DeleteBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#DeleteBuilder) +- [InsertBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#InsertBuilder) +- [SelectBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#SelectBuilder) +- [UnionBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#UnionBuilder) +- [UpdateBuilder](https://pkg.go.dev/github.com/huandu/go-sqlbuilder#UpdateBuilder) + +Example: define a global SELECT template and clone it per call + +```go +package yourpkg + +import "github.com/huandu/go-sqlbuilder" + +// Global template — safe to reuse by cloning. +var baseUserSelect = sqlbuilder.NewSelectBuilder(). + Select("id", "name", "email"). + From("users"). + Where("deleted_at IS NULL") + +func ListActiveUsers(limit, offset int) (string, []interface{}) { + sb := baseUserSelect.Clone() // independent copy + sb.OrderBy("id").Asc() + sb.Limit(limit).Offset(offset) + return sb.Build() +} + +func GetActiveUserByID(id int64) (string, []interface{}) { + sb := baseUserSelect.Clone() // start from the same template + sb.Where(sb.Equal("id", id)) + sb.Limit(1) + return sb.Build() +} +``` + +The same template pattern applies to other builders. For example, keep a base `UpdateBuilder` with the table and common `SET` clauses, or a base `CTEBuilder` defining reusable CTEs, then `Clone()` and add query-specific `WHERE`/`ORDER BY`/`LIMIT`/`RETURNING` as needed. + +### Using special syntax to build SQL + +The `sqlbuilder` package incorporates special syntax for representing uncompiled SQL internally. To leverage this syntax for developing customized tools, the `Build` function can be utilized to compile it with the necessary arguments. + +The format string employs special syntax for representing arguments: + +- `$?` references successive arguments supplied in the function call, functioning similarly to `%v` in `fmt.Sprintf`. +- `$0`, `$1`, ..., `$n` reference the nth argument provided in the call; subsequent `$?` will then refer to arguments n+1 onwards. +- `${name}` references a named argument defined by `Named` using the specified `name`. +- `$$` represents a literal `"$"` character. + +```go +sb := sqlbuilder.NewSelectBuilder() +sb.Select("id").From("user").Where(sb.In("status", 1, 2)) + +b := sqlbuilder.Build("EXPLAIN $? LEFT JOIN SELECT * FROM $? WHERE created_at > $? AND state IN (${states}) AND modified_at BETWEEN $2 AND $?", + sb, sqlbuilder.Raw("banned"), 1514458225, 1514544625, sqlbuilder.Named("states", sqlbuilder.List([]int{3, 4, 5}))) +sql, args := b.Build() + +fmt.Println(sql) +fmt.Println(args) + +// Output: +// EXPLAIN SELECT id FROM user WHERE status IN (?, ?) LEFT JOIN SELECT * FROM banned WHERE created_at > ? AND state IN (?, ?, ?) AND modified_at BETWEEN ? AND ? +// [1 2 1514458225 3 4 5 1514458225 1514544625] +``` + +For scenarios where only the `${name}` syntax is required to reference named arguments, utilize `BuildNamed`. This function disables all special syntax except for `${name}` and `$$`. + +### Interpolate `args` in the `sql` + +Certain SQL-like drivers, such as those for Redis or Elasticsearch, do not implement the `StmtExecContext#ExecContext` method. These drivers encounter issues when `len(args) > 0`. The sole workaround is to interpolate `args` directly into the `sql` string and then execute the resulting query with the driver. + +The interpolation feature in this package is designed to provide a "basically sufficient" level of functionality, rather than a capability that rivals the comprehensive features of various SQL drivers and DBMS systems. + +_Security warning_: While efforts are made to escape special characters in interpolation methods, this approach remains less secure than using `Stmt` as implemented by SQL drivers. + +This feature draws inspiration from the interpolation capabilities found in the `github.com/go-sql-driver/mysql` package. + +Here is an example specifically for MySQL: + +```go +sb := MySQL.NewSelectBuilder() +sb.Select("name").From("user").Where( + sb.NE("id", 1234), + sb.E("name", "Charmy Liu"), + sb.Like("desc", "%mother's day%"), +) +sql, args := sb.Build() +query, err := MySQL.Interpolate(sql, args) + +fmt.Println(query) +fmt.Println(err) + +// Output: +// SELECT name FROM user WHERE id <> 1234 AND name = 'Charmy Liu' AND desc LIKE '%mother\'s day%' +// +``` + +Here is an example for PostgreSQL, noting that dollar quoting is supported: + +```go +// Only the last `$1` is interpolated. +// Others are not interpolated as they are inside dollar quote (the `$$`). +query, err := PostgreSQL.Interpolate(` +CREATE FUNCTION dup(in int, out f1 int, out f2 text) AS $$ + SELECT $1, CAST($1 AS text) || ' is text' +$$ +LANGUAGE SQL; + +SELECT * FROM dup($1);`, []interface{}{42}) + +fmt.Println(query) +fmt.Println(err) + +// Output: +// +// CREATE FUNCTION dup(in int, out f1 int, out f2 text) AS $$ +// SELECT $1, CAST($1 AS text) || ' is text' +// $$ +// LANGUAGE SQL; +// +// SELECT * FROM dup(42); +// +``` + +## License + +This package is licensed under the MIT license. For more information, refer to the LICENSE file. diff --git a/vendor/github.com/huandu/go-sqlbuilder/args.go b/vendor/github.com/huandu/go-sqlbuilder/args.go new file mode 100644 index 000000000..3467a80a3 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/args.go @@ -0,0 +1,437 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "database/sql" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/huandu/go-clone" +) + +// Args stores arguments associated with a SQL. +type Args struct { + // The default flavor used by `Args#Compile` + Flavor Flavor + + indexBase int + argValues *valueStore + namedArgs map[string]int + sqlNamedArgs map[string]int + onlyNamed bool +} + +func init() { + // Predefine some $n args to avoid additional memory allocation. + predefinedArgs = make([]string, 0, maxPredefinedArgs) + + for i := 0; i < maxPredefinedArgs; i++ { + predefinedArgs = append(predefinedArgs, fmt.Sprintf("$%v", i)) + } +} + +const maxPredefinedArgs = 64 + +var predefinedArgs []string + +// Add adds an arg to Args and returns a placeholder. +func (args *Args) Add(arg interface{}) string { + idx := args.add(arg) + + if idx < maxPredefinedArgs { + return predefinedArgs[idx] + } + + return fmt.Sprintf("$%v", idx) +} + +func (args *Args) add(arg interface{}) int { + idx := args.argValues.Len() + args.indexBase + + switch a := arg.(type) { + case sql.NamedArg: + if args.sqlNamedArgs == nil { + args.sqlNamedArgs = map[string]int{} + } + + if p, ok := args.sqlNamedArgs[a.Name]; ok { + arg = args.argValues.Load(p) + break + } + + args.sqlNamedArgs[a.Name] = idx + case namedArgs: + if args.namedArgs == nil { + args.namedArgs = map[string]int{} + } + + if p, ok := args.namedArgs[a.name]; ok { + arg = args.argValues.Load(p) + break + } + + // Find out the real arg and add it to args. + idx = args.add(a.arg) + args.namedArgs[a.name] = idx + return idx + } + + if args.argValues == nil { + args.argValues = &valueStore{} + } + + args.argValues.Add(arg) + return idx +} + +// Replace replaces the placeholder with arg. +// +// The placeholder must be the value returned by `Add`, e.g. "$1". +// If the placeholder is not found, this method does nothing. +func (args *Args) Replace(placeholder string, arg interface{}) { + dollar := strings.IndexRune(placeholder, '$') + + if dollar != 0 { + return + } + + if i, err := strconv.Atoi(placeholder[1:]); err == nil { + i -= args.indexBase + args.argValues.Set(i, arg) + } +} + +// Compile compiles builder's format to standard sql and returns associated args. +// +// The format string uses a special syntax to represent arguments. +// +// $? refers successive arguments passed in the call. It works similar as `%v` in `fmt.Sprintf`. +// $0 $1 ... $n refers nth-argument passed in the call. Next $? will use arguments n+1. +// ${name} refers a named argument created by `Named` with `name`. +// $$ is a "$" string. +func (args *Args) Compile(format string, initialValue ...interface{}) (query string, values []interface{}) { + return args.CompileWithFlavor(format, args.Flavor, initialValue...) +} + +// CompileWithFlavor compiles builder's format to standard sql with flavor and returns associated args. +// +// See doc for `Compile` to learn details. +func (args *Args) CompileWithFlavor(format string, flavor Flavor, initialValue ...interface{}) (query string, values []interface{}) { + idx := strings.IndexRune(format, '$') + offset := 0 + ctx := &argsCompileContext{ + stringBuilder: newStringBuilder(), + Flavor: flavor, + Values: initialValue, + } + + if ctx.Flavor == invalidFlavor { + ctx.Flavor = DefaultFlavor + } + + for idx >= 0 && len(format) > 0 { + if idx > 0 { + ctx.WriteString(format[:idx]) + } + + format = format[idx+1:] + + // Treat the $ at the end of format is a normal $ rune. + if len(format) == 0 { + ctx.WriteRune('$') + break + } + + if r := format[0]; r == '$' { + ctx.WriteRune('$') + format = format[1:] + } else if r == '{' { + format = args.compileNamed(ctx, format) + } else if !args.onlyNamed && '0' <= r && r <= '9' { + format, offset = args.compileDigits(ctx, format, offset) + } else if !args.onlyNamed && r == '?' { + format, offset = args.compileSuccessive(ctx, format[1:], offset) + } else { + // For unknown $ expression format, treat it as a normal $ rune. + ctx.WriteRune('$') + } + + idx = strings.IndexRune(format, '$') + } + + if len(format) > 0 { + ctx.WriteString(format) + } + + query = ctx.String() + values = args.mergeSQLNamedArgs(ctx) + return +} + +// Value returns the value of the arg. +// The arg must be the value returned by `Add`. +func (args *Args) Value(arg string) interface{} { + _, values := args.Compile(arg) + + if len(values) == 0 { + return nil + } + + return values[0] +} + +func (args *Args) compileNamed(ctx *argsCompileContext, format string) string { + i := 1 + + for ; i < len(format) && format[i] != '}'; i++ { + // Nothing. + } + + // Invalid $ format. Ignore it. + if i == len(format) { + return format + } + + name := format[1:i] + format = format[i+1:] + + if p, ok := args.namedArgs[name]; ok { + format, _ = args.compileSuccessive(ctx, format, p-args.indexBase) + } + + return format +} + +func (args *Args) compileDigits(ctx *argsCompileContext, format string, offset int) (string, int) { + i := 1 + + for ; i < len(format) && '0' <= format[i] && format[i] <= '9'; i++ { + // Nothing. + } + + digits := format[:i] + format = format[i:] + + if pointer, err := strconv.Atoi(digits); err == nil { + return args.compileSuccessive(ctx, format, pointer-args.indexBase) + } + + return format, offset +} + +func (args *Args) compileSuccessive(ctx *argsCompileContext, format string, offset int) (string, int) { + if offset < 0 || offset >= args.argValues.Len() { + ctx.WriteString("/* INVALID ARG $") + ctx.WriteString(strconv.Itoa(offset)) + ctx.WriteString(" */") + return format, offset + } + + arg := args.argValues.Load(offset) + ctx.WriteValue(arg) + + return format, offset + 1 +} + +func (args *Args) mergeSQLNamedArgs(ctx *argsCompileContext) []interface{} { + if len(args.sqlNamedArgs) == 0 && len(ctx.NamedArgs) == 0 { + return ctx.Values + } + + values := ctx.Values + existingNames := make(map[string]struct{}, len(ctx.NamedArgs)) + + // Add all named args to values. + // Remove duplicated named args in this step. + for _, arg := range ctx.NamedArgs { + if _, ok := existingNames[arg.Name]; !ok { + existingNames[arg.Name] = struct{}{} + values = append(values, arg) + } + } + + // Stabilize the sequence to make it easier to write test cases. + ints := make([]int, 0, len(args.sqlNamedArgs)) + + for n, p := range args.sqlNamedArgs { + if _, ok := existingNames[n]; ok { + continue + } + + ints = append(ints, p) + } + + sort.Ints(ints) + + for _, i := range ints { + values = append(values, args.argValues.Load(i)) + } + + return values +} + +func parseNamedArgs(initialValue []interface{}) (values []interface{}, namedValues []sql.NamedArg) { + if len(initialValue) == 0 { + values = initialValue + return + } + + // sql.NamedArgs must be placed at the end of the initial value. + size := len(initialValue) + i := size + + for ; i > 0; i-- { + switch initialValue[i-1].(type) { + case sql.NamedArg: + continue + } + + break + } + + if i == size { + values = initialValue + return + } + + values = initialValue[:i] + namedValues = make([]sql.NamedArg, 0, size-i) + + for ; i < size; i++ { + namedValues = append(namedValues, initialValue[i].(sql.NamedArg)) + } + + return +} + +type argsCompileContext struct { + *stringBuilder + + Flavor Flavor + Values []interface{} + NamedArgs []sql.NamedArg +} + +func (ctx *argsCompileContext) WriteValue(arg interface{}) { + switch a := arg.(type) { + case Builder: + s, values := a.BuildWithFlavor(ctx.Flavor, ctx.Values...) + ctx.WriteString(s) + + // Add all values to ctx. + // Named args must be located at the end of values. + values, namedArgs := parseNamedArgs(values) + ctx.Values = values + ctx.NamedArgs = append(ctx.NamedArgs, namedArgs...) + + case sql.NamedArg: + ctx.WriteRune('@') + ctx.WriteString(a.Name) + ctx.NamedArgs = append(ctx.NamedArgs, a) + + case rawArgs: + ctx.WriteString(a.expr) + + case listArgs: + if a.isTuple { + ctx.WriteRune('(') + } + + if len(a.args) > 0 { + ctx.WriteValue(a.args[0]) + } + + for i := 1; i < len(a.args); i++ { + ctx.WriteString(", ") + ctx.WriteValue(a.args[i]) + } + + if a.isTuple { + ctx.WriteRune(')') + } + + case condBuilder: + a.Builder(ctx) + + default: + switch ctx.Flavor { + case MySQL, SQLite, CQL, ClickHouse, Presto, Informix, Doris: + ctx.WriteRune('?') + case PostgreSQL: + fmt.Fprintf(ctx, "$%d", len(ctx.Values)+1) + case SQLServer: + fmt.Fprintf(ctx, "@p%d", len(ctx.Values)+1) + case Oracle: + fmt.Fprintf(ctx, ":%d", len(ctx.Values)+1) + default: + panic(fmt.Errorf("Args.CompileWithFlavor: invalid flavor %v (%v)", ctx.Flavor, int(ctx.Flavor))) + } + + ctx.Values = append(ctx.Values, arg) + } +} + +func (ctx *argsCompileContext) WriteValues(values []interface{}, sep string) { + if len(values) == 0 { + return + } + + ctx.WriteValue(values[0]) + + for _, v := range values[1:] { + ctx.WriteString(sep) + ctx.WriteValue(v) + } +} + +type valueStore struct { + Values []interface{} +} + +func init() { + // The values in valueStore should be shadow-copied to avoid unnecessary cost. + t := reflect.TypeOf(valueStore{}) + clone.SetCustomFunc(t, func(allocator *clone.Allocator, old, new reflect.Value) { + values := old.FieldByName("Values") + newValues := allocator.Clone(values) + new.FieldByName("Values").Set(newValues) + }) +} + +func (as *valueStore) Len() int { + if as == nil { + return 0 + } + + return len(as.Values) +} + +// Add adds an arg to argsValues and returns its index. +func (as *valueStore) Add(arg interface{}) int { + as.Values = append(as.Values, arg) + return len(as.Values) - 1 +} + +// Set sets the arg value by index. +func (as *valueStore) Set(index int, arg interface{}) { + if as == nil || index < 0 || index >= len(as.Values) { + return + } + + as.Values[index] = arg +} + +// Load returns the arg value by index. +// Returns nil if index is out of range or as itself is nil. +func (as *valueStore) Load(index int) interface{} { + if as == nil || index < 0 || index >= len(as.Values) { + return nil + } + + return as.Values[index] +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/builder.go b/vendor/github.com/huandu/go-sqlbuilder/builder.go new file mode 100644 index 000000000..692cb140f --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/builder.go @@ -0,0 +1,119 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "fmt" +) + +// Builder is a general SQL builder. +// It's used by Args to create nested SQL like the `IN` expression in +// `SELECT * FROM t1 WHERE id IN (SELECT id FROM t2)`. +type Builder interface { + Build() (sql string, args []interface{}) + BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) + Flavor() Flavor +} + +type compiledBuilder struct { + args *Args + format string +} + +var _ Builder = new(compiledBuilder) + +func (cb *compiledBuilder) Build() (sql string, args []interface{}) { + return cb.args.Compile(cb.format) +} + +func (cb *compiledBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + return cb.args.CompileWithFlavor(cb.format, flavor, initialArg...) +} + +// Flavor returns flavor of builder +// Always returns DefaultFlavor +func (cb *compiledBuilder) Flavor() Flavor { + return cb.args.Flavor +} + +type flavoredBuilder struct { + builder Builder + flavor Flavor +} + +func (fb *flavoredBuilder) Build() (sql string, args []interface{}) { + return fb.builder.BuildWithFlavor(fb.flavor) +} + +func (fb *flavoredBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + return fb.builder.BuildWithFlavor(flavor, initialArg...) +} + +// Flavor returns flavor of builder +func (fb *flavoredBuilder) Flavor() Flavor { + return fb.flavor +} + +// WithFlavor creates a new Builder based on builder with a default flavor. +func WithFlavor(builder Builder, flavor Flavor) Builder { + return &flavoredBuilder{ + builder: builder, + flavor: flavor, + } +} + +// Buildf creates a Builder from a format string using `fmt.Sprintf`-like syntax. +// As all arguments will be converted to a string internally, e.g. "$0", +// only `%v` and `%s` are valid. +func Buildf(format string, arg ...interface{}) Builder { + args := &Args{ + Flavor: DefaultFlavor, + } + vars := make([]interface{}, 0, len(arg)) + + for _, a := range arg { + vars = append(vars, args.Add(a)) + } + + return &compiledBuilder{ + args: args, + format: fmt.Sprintf(Escape(format), vars...), + } +} + +// Build creates a Builder from a format string. +// The format string uses special syntax to represent arguments. +// See doc in `Args#Compile` for syntax details. +func Build(format string, arg ...interface{}) Builder { + args := &Args{ + Flavor: DefaultFlavor, + } + + for _, a := range arg { + args.Add(a) + } + + return &compiledBuilder{ + args: args, + format: format, + } +} + +// BuildNamed creates a Builder from a format string. +// The format string uses `${key}` to refer the value of named by key. +func BuildNamed(format string, named map[string]interface{}) Builder { + args := &Args{ + Flavor: DefaultFlavor, + onlyNamed: true, + } + + for n, v := range named { + args.Add(Named(n, v)) + } + + return &compiledBuilder{ + args: args, + format: format, + } +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/cond.go b/vendor/github.com/huandu/go-sqlbuilder/cond.go new file mode 100644 index 000000000..e74cbb744 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/cond.go @@ -0,0 +1,650 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +const ( + lparen = "(" + rparen = ")" + opOR = " OR " + opAND = " AND " + opNOT = "NOT " +) + +const minIndexBase = 256 + +// Cond provides several helper methods to build conditions. +type Cond struct { + Args *Args +} + +// NewCond returns a new Cond. +func NewCond() *Cond { + return &Cond{ + Args: &Args{ + // Based on the discussion in #174, users may call this method to create + // `Cond` for building various conditions, which is a misuse, but we + // cannot completely prevent this error. To facilitate users in + // identifying the issue when they make mistakes and to avoid + // unexpected stackoverflows, the base index for `Args` is + // deliberately set to a larger non-zero value here. This can + // significantly reduce the likelihood of issues and allows for + // timely error notification to users. + indexBase: minIndexBase, + }, + } +} + +// Equal is used to construct the expression "field = value". +func (c *Cond) Equal(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" = ") + ctx.WriteValue(value) + }, + }) +} + +// E is an alias of Equal. +func (c *Cond) E(field string, value interface{}) string { + return c.Equal(field, value) +} + +// EQ is an alias of Equal. +func (c *Cond) EQ(field string, value interface{}) string { + return c.Equal(field, value) +} + +// NotEqual is used to construct the expression "field <> value". +func (c *Cond) NotEqual(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" <> ") + ctx.WriteValue(value) + }, + }) +} + +// NE is an alias of NotEqual. +func (c *Cond) NE(field string, value interface{}) string { + return c.NotEqual(field, value) +} + +// NEQ is an alias of NotEqual. +func (c *Cond) NEQ(field string, value interface{}) string { + return c.NotEqual(field, value) +} + +// GreaterThan is used to construct the expression "field > value". +func (c *Cond) GreaterThan(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" > ") + ctx.WriteValue(value) + }, + }) +} + +// G is an alias of GreaterThan. +func (c *Cond) G(field string, value interface{}) string { + return c.GreaterThan(field, value) +} + +// GT is an alias of GreaterThan. +func (c *Cond) GT(field string, value interface{}) string { + return c.GreaterThan(field, value) +} + +// GreaterEqualThan is used to construct the expression "field >= value". +func (c *Cond) GreaterEqualThan(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" >= ") + ctx.WriteValue(value) + }, + }) +} + +// GE is an alias of GreaterEqualThan. +func (c *Cond) GE(field string, value interface{}) string { + return c.GreaterEqualThan(field, value) +} + +// GTE is an alias of GreaterEqualThan. +func (c *Cond) GTE(field string, value interface{}) string { + return c.GreaterEqualThan(field, value) +} + +// LessThan is used to construct the expression "field < value". +func (c *Cond) LessThan(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" < ") + ctx.WriteValue(value) + }, + }) +} + +// L is an alias of LessThan. +func (c *Cond) L(field string, value interface{}) string { + return c.LessThan(field, value) +} + +// LT is an alias of LessThan. +func (c *Cond) LT(field string, value interface{}) string { + return c.LessThan(field, value) +} + +// LessEqualThan is used to construct the expression "field <= value". +func (c *Cond) LessEqualThan(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" <= ") + ctx.WriteValue(value) + }, + }) +} + +// LE is an alias of LessEqualThan. +func (c *Cond) LE(field string, value interface{}) string { + return c.LessEqualThan(field, value) +} + +// LTE is an alias of LessEqualThan. +func (c *Cond) LTE(field string, value interface{}) string { + return c.LessEqualThan(field, value) +} + +// In is used to construct the expression "field IN (value...)". +func (c *Cond) In(field string, values ...interface{}) string { + if len(field) == 0 { + return "" + } + + // Empty values means "false". + if len(values) == 0 { + return "0 = 1" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" IN (") + ctx.WriteValues(values, ", ") + ctx.WriteString(")") + }, + }) +} + +// NotIn is used to construct the expression "field NOT IN (value...)". +func (c *Cond) NotIn(field string, values ...interface{}) string { + if len(field) == 0 { + return "" + } + + // Empty values means "true". + if len(values) == 0 { + return "0 = 0" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" NOT IN (") + ctx.WriteValues(values, ", ") + ctx.WriteString(")") + }, + }) +} + +// Like is used to construct the expression "field LIKE value". +func (c *Cond) Like(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" LIKE ") + ctx.WriteValue(value) + }, + }) +} + +// ILike is used to construct the expression "field ILIKE value". +// +// When the database system does not support the ILIKE operator, +// the ILike method will return "LOWER(field) LIKE LOWER(value)" +// to simulate the behavior of the ILIKE operator. +func (c *Cond) ILike(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + switch ctx.Flavor { + case PostgreSQL, SQLite: + ctx.WriteString(field) + ctx.WriteString(" ILIKE ") + ctx.WriteValue(value) + + default: + // Use LOWER to simulate ILIKE. + ctx.WriteString("LOWER(") + ctx.WriteString(field) + ctx.WriteString(") LIKE LOWER(") + ctx.WriteValue(value) + ctx.WriteString(")") + } + }, + }) +} + +// NotLike is used to construct the expression "field NOT LIKE value". +func (c *Cond) NotLike(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" NOT LIKE ") + ctx.WriteValue(value) + }, + }) +} + +// NotILike is used to construct the expression "field NOT ILIKE value". +// +// When the database system does not support the ILIKE operator, +// the NotILike method will return "LOWER(field) NOT LIKE LOWER(value)" +// to simulate the behavior of the ILIKE operator. +func (c *Cond) NotILike(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + switch ctx.Flavor { + case PostgreSQL, SQLite: + ctx.WriteString(field) + ctx.WriteString(" NOT ILIKE ") + ctx.WriteValue(value) + + default: + // Use LOWER to simulate ILIKE. + ctx.WriteString("LOWER(") + ctx.WriteString(field) + ctx.WriteString(") NOT LIKE LOWER(") + ctx.WriteValue(value) + ctx.WriteString(")") + } + }, + }) +} + +// IsNull is used to construct the expression "field IS NULL". +func (c *Cond) IsNull(field string) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" IS NULL") + }, + }) +} + +// IsNotNull is used to construct the expression "field IS NOT NULL". +func (c *Cond) IsNotNull(field string) string { + if len(field) == 0 { + return "" + } + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" IS NOT NULL") + }, + }) +} + +// Between is used to construct the expression "field BETWEEN lower AND upper". +func (c *Cond) Between(field string, lower, upper interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" BETWEEN ") + ctx.WriteValue(lower) + ctx.WriteString(" AND ") + ctx.WriteValue(upper) + }, + }) +} + +// NotBetween is used to construct the expression "field NOT BETWEEN lower AND upper". +func (c *Cond) NotBetween(field string, lower, upper interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" NOT BETWEEN ") + ctx.WriteValue(lower) + ctx.WriteString(" AND ") + ctx.WriteValue(upper) + }, + }) +} + +// Or is used to construct the expression OR logic like "expr1 OR expr2 OR expr3". +func (c *Cond) Or(orExpr ...string) string { + orExpr = filterEmptyStrings(orExpr) + + if len(orExpr) == 0 { + return "" + } + + exprByteLen := estimateStringsBytes(orExpr) + if exprByteLen == 0 { + return "" + } + + buf := newStringBuilder() + + // Ensure that there is only 1 memory allocation. + size := len(lparen) + len(rparen) + (len(orExpr)-1)*len(opOR) + exprByteLen + buf.Grow(size) + + buf.WriteString(lparen) + buf.WriteStrings(orExpr, opOR) + buf.WriteString(rparen) + return buf.String() +} + +// And is used to construct the expression AND logic like "expr1 AND expr2 AND expr3". +func (c *Cond) And(andExpr ...string) string { + andExpr = filterEmptyStrings(andExpr) + + if len(andExpr) == 0 { + return "" + } + + exprByteLen := estimateStringsBytes(andExpr) + if exprByteLen == 0 { + return "" + } + + buf := newStringBuilder() + + // Ensure that there is only 1 memory allocation. + size := len(lparen) + len(rparen) + (len(andExpr)-1)*len(opAND) + exprByteLen + buf.Grow(size) + + buf.WriteString(lparen) + buf.WriteStrings(andExpr, opAND) + buf.WriteString(rparen) + return buf.String() +} + +// Not is used to construct the expression "NOT expr". +func (c *Cond) Not(notExpr string) string { + if len(notExpr) == 0 { + return "" + } + + buf := newStringBuilder() + + // Ensure that there is only 1 memory allocation. + size := len(opNOT) + len(notExpr) + buf.Grow(size) + + buf.WriteString(opNOT) + buf.WriteString(notExpr) + return buf.String() +} + +// Exists is used to construct the expression "EXISTS (subquery)". +func (c *Cond) Exists(subquery interface{}) string { + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString("EXISTS (") + ctx.WriteValue(subquery) + ctx.WriteString(")") + }, + }) +} + +// NotExists is used to construct the expression "NOT EXISTS (subquery)". +func (c *Cond) NotExists(subquery interface{}) string { + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString("NOT EXISTS (") + ctx.WriteValue(subquery) + ctx.WriteString(")") + }, + }) +} + +// Any is used to construct the expression "field op ANY (value...)". +func (c *Cond) Any(field, op string, values ...interface{}) string { + if len(field) == 0 || len(op) == 0 { + return "" + } + + // Empty values means "false". + if len(values) == 0 { + return "0 = 1" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" ") + ctx.WriteString(op) + ctx.WriteString(" ANY (") + ctx.WriteValues(values, ", ") + ctx.WriteString(")") + }, + }) +} + +// All is used to construct the expression "field op ALL (value...)". +func (c *Cond) All(field, op string, values ...interface{}) string { + if len(field) == 0 || len(op) == 0 { + return "" + } + + // Empty values means "false". + if len(values) == 0 { + return "0 = 1" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" ") + ctx.WriteString(op) + ctx.WriteString(" ALL (") + ctx.WriteValues(values, ", ") + ctx.WriteString(")") + }, + }) +} + +// Some is used to construct the expression "field op SOME (value...)". +func (c *Cond) Some(field, op string, values ...interface{}) string { + if len(field) == 0 || len(op) == 0 { + return "" + } + + // Empty values means "false". + if len(values) == 0 { + return "0 = 1" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + ctx.WriteString(field) + ctx.WriteString(" ") + ctx.WriteString(op) + ctx.WriteString(" SOME (") + ctx.WriteValues(values, ", ") + ctx.WriteString(")") + }, + }) +} + +// IsDistinctFrom is used to construct the expression "field IS DISTINCT FROM value". +// +// When the database system does not support the IS DISTINCT FROM operator, +// the NotILike method will return "NOT field <=> value" for MySQL or a +// "CASE ... WHEN ... ELSE ... END" expression to simulate the behavior of +// the IS DISTINCT FROM operator. +func (c *Cond) IsDistinctFrom(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + switch ctx.Flavor { + case PostgreSQL, SQLite, SQLServer: + ctx.WriteString(field) + ctx.WriteString(" IS DISTINCT FROM ") + ctx.WriteValue(value) + + case MySQL: + ctx.WriteString("NOT ") + ctx.WriteString(field) + ctx.WriteString(" <=> ") + ctx.WriteValue(value) + + default: + // CASE + // WHEN field IS NULL AND value IS NULL THEN 0 + // WHEN field IS NOT NULL AND value IS NOT NULL AND field = value THEN 0 + // ELSE 1 + // END = 1 + ctx.WriteString("CASE WHEN ") + ctx.WriteString(field) + ctx.WriteString(" IS NULL AND ") + ctx.WriteValue(value) + ctx.WriteString(" IS NULL THEN 0 WHEN ") + ctx.WriteString(field) + ctx.WriteString(" IS NOT NULL AND ") + ctx.WriteValue(value) + ctx.WriteString(" IS NOT NULL AND ") + ctx.WriteString(field) + ctx.WriteString(" = ") + ctx.WriteValue(value) + ctx.WriteString(" THEN 0 ELSE 1 END = 1") + } + }, + }) +} + +// IsNotDistinctFrom is used to construct the expression "field IS NOT DISTINCT FROM value". +// +// When the database system does not support the IS NOT DISTINCT FROM operator, +// the NotILike method will return "field <=> value" for MySQL or a +// "CASE ... WHEN ... ELSE ... END" expression to simulate the behavior of +// the IS NOT DISTINCT FROM operator. +func (c *Cond) IsNotDistinctFrom(field string, value interface{}) string { + if len(field) == 0 { + return "" + } + + return c.Var(condBuilder{ + Builder: func(ctx *argsCompileContext) { + switch ctx.Flavor { + case PostgreSQL, SQLite, SQLServer: + ctx.WriteString(field) + ctx.WriteString(" IS NOT DISTINCT FROM ") + ctx.WriteValue(value) + + case MySQL: + ctx.WriteString(field) + ctx.WriteString(" <=> ") + ctx.WriteValue(value) + + default: + // CASE + // WHEN field IS NULL AND value IS NULL THEN 1 + // WHEN field IS NOT NULL AND value IS NOT NULL AND field = value THEN 1 + // ELSE 0 + // END = 1 + ctx.WriteString("CASE WHEN ") + ctx.WriteString(field) + ctx.WriteString(" IS NULL AND ") + ctx.WriteValue(value) + ctx.WriteString(" IS NULL THEN 1 WHEN ") + ctx.WriteString(field) + ctx.WriteString(" IS NOT NULL AND ") + ctx.WriteValue(value) + ctx.WriteString(" IS NOT NULL AND ") + ctx.WriteString(field) + ctx.WriteString(" = ") + ctx.WriteValue(value) + ctx.WriteString(" THEN 1 ELSE 0 END = 1") + } + }, + }) +} + +// Var returns a placeholder for value. +func (c *Cond) Var(value interface{}) string { + return c.Args.Add(value) +} + +type condBuilder struct { + Builder func(ctx *argsCompileContext) +} + +func estimateStringsBytes(strs []string) (n int) { + for _, s := range strs { + n += len(s) + } + + return +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/createtable.go b/vendor/github.com/huandu/go-sqlbuilder/createtable.go new file mode 100644 index 000000000..31b163dbe --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/createtable.go @@ -0,0 +1,183 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "strings" + + "github.com/huandu/go-clone" +) + +const ( + createTableMarkerInit injectionMarker = iota + createTableMarkerAfterCreate + createTableMarkerAfterDefine + createTableMarkerAfterOption +) + +// NewCreateTableBuilder creates a new CREATE TABLE builder. +func NewCreateTableBuilder() *CreateTableBuilder { + return DefaultFlavor.NewCreateTableBuilder() +} + +func newCreateTableBuilder() *CreateTableBuilder { + args := &Args{} + return &CreateTableBuilder{ + verb: "CREATE TABLE", + args: args, + injection: newInjection(), + marker: createTableMarkerInit, + } +} + +// Clone returns a deep copy of CreateTableBuilder. +// It's useful when you want to create a base builder and clone it to build similar queries. +func (ctb *CreateTableBuilder) Clone() *CreateTableBuilder { + return clone.Clone(ctb).(*CreateTableBuilder) +} + +// CreateTableBuilder is a builder to build CREATE TABLE. +type CreateTableBuilder struct { + verb string + ifNotExists bool + table string + defs [][]string + options [][]string + + args *Args + + injection *injection + marker injectionMarker +} + +var _ Builder = new(CreateTableBuilder) + +// CreateTable sets the table name in CREATE TABLE. +func CreateTable(table string) *CreateTableBuilder { + return DefaultFlavor.NewCreateTableBuilder().CreateTable(table) +} + +// CreateTable sets the table name in CREATE TABLE. +func (ctb *CreateTableBuilder) CreateTable(table string) *CreateTableBuilder { + ctb.table = Escape(table) + ctb.marker = createTableMarkerAfterCreate + return ctb +} + +// CreateTempTable sets the table name and changes the verb of ctb to CREATE TEMPORARY TABLE. +func (ctb *CreateTableBuilder) CreateTempTable(table string) *CreateTableBuilder { + ctb.verb = "CREATE TEMPORARY TABLE" + ctb.table = Escape(table) + ctb.marker = createTableMarkerAfterCreate + return ctb +} + +// IfNotExists adds IF NOT EXISTS before table name in CREATE TABLE. +func (ctb *CreateTableBuilder) IfNotExists() *CreateTableBuilder { + ctb.ifNotExists = true + return ctb +} + +// Define adds definition of a column or index in CREATE TABLE. +func (ctb *CreateTableBuilder) Define(def ...string) *CreateTableBuilder { + ctb.defs = append(ctb.defs, def) + ctb.marker = createTableMarkerAfterDefine + return ctb +} + +// Option adds a table option in CREATE TABLE. +func (ctb *CreateTableBuilder) Option(opt ...string) *CreateTableBuilder { + ctb.options = append(ctb.options, opt) + ctb.marker = createTableMarkerAfterOption + return ctb +} + +// NumDefine returns the number of definitions in CREATE TABLE. +func (ctb *CreateTableBuilder) NumDefine() int { + return len(ctb.defs) +} + +// String returns the compiled INSERT string. +func (ctb *CreateTableBuilder) String() string { + s, _ := ctb.Build() + return s +} + +// Build returns compiled CREATE TABLE string and args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (ctb *CreateTableBuilder) Build() (sql string, args []interface{}) { + return ctb.BuildWithFlavor(ctb.args.Flavor) +} + +// BuildWithFlavor returns compiled CREATE TABLE string and args with flavor and initial args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (ctb *CreateTableBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + buf := newStringBuilder() + ctb.injection.WriteTo(buf, createTableMarkerInit) + + if len(ctb.verb) > 0 { + buf.WriteLeadingString(ctb.verb) + } + + if ctb.ifNotExists { + buf.WriteLeadingString("IF NOT EXISTS") + } + + if len(ctb.table) > 0 { + buf.WriteLeadingString(ctb.table) + } + + ctb.injection.WriteTo(buf, createTableMarkerAfterCreate) + + if len(ctb.defs) > 0 { + buf.WriteLeadingString("(") + + defs := make([]string, 0, len(ctb.defs)) + + for _, def := range ctb.defs { + defs = append(defs, strings.Join(def, " ")) + } + + buf.WriteStrings(defs, ", ") + buf.WriteRune(')') + + ctb.injection.WriteTo(buf, createTableMarkerAfterDefine) + } + + if len(ctb.options) > 0 { + opts := make([]string, 0, len(ctb.options)) + + for _, opt := range ctb.options { + opts = append(opts, strings.Join(opt, " ")) + } + + buf.WriteLeadingString(strings.Join(opts, ", ")) + ctb.injection.WriteTo(buf, createTableMarkerAfterOption) + } + + return ctb.args.CompileWithFlavor(buf.String(), flavor, initialArg...) +} + +// SetFlavor sets the flavor of compiled sql. +func (ctb *CreateTableBuilder) SetFlavor(flavor Flavor) (old Flavor) { + old = ctb.args.Flavor + ctb.args.Flavor = flavor + return +} + +// Flavor returns flavor of builder +func (ctb *CreateTableBuilder) Flavor() Flavor { + return ctb.args.Flavor +} + +// Var returns a placeholder for value. +func (ctb *CreateTableBuilder) Var(arg interface{}) string { + return ctb.args.Add(arg) +} + +// SQL adds an arbitrary sql to current position. +func (ctb *CreateTableBuilder) SQL(sql string) *CreateTableBuilder { + ctb.injection.SQL(ctb.marker, sql) + return ctb +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/cte.go b/vendor/github.com/huandu/go-sqlbuilder/cte.go new file mode 100644 index 000000000..b32dcfaf6 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/cte.go @@ -0,0 +1,192 @@ +// Copyright 2024 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "reflect" + + "github.com/huandu/go-clone" +) + +const ( + cteMarkerInit injectionMarker = iota + cteMarkerAfterWith +) + +// With creates a new CTE builder with default flavor. +func With(tables ...*CTEQueryBuilder) *CTEBuilder { + return DefaultFlavor.NewCTEBuilder().With(tables...) +} + +// WithRecursive creates a new recursive CTE builder with default flavor. +func WithRecursive(tables ...*CTEQueryBuilder) *CTEBuilder { + return DefaultFlavor.NewCTEBuilder().WithRecursive(tables...) +} + +func newCTEBuilder() *CTEBuilder { + return &CTEBuilder{ + args: &Args{}, + injection: newInjection(), + } +} + +// Clone returns a deep copy of CTEBuilder. +// It's useful when you want to create a base builder and clone it to build similar queries. +func (cteb *CTEBuilder) Clone() *CTEBuilder { + return clone.Clone(cteb).(*CTEBuilder) +} + +func init() { + t := reflect.TypeOf(CTEBuilder{}) + clone.SetCustomFunc(t, func(allocator *clone.Allocator, old, new reflect.Value) { + cloned := allocator.CloneSlowly(old) + new.Set(cloned) + + cteb := cloned.Addr().Interface().(*CTEBuilder) + for i, b := range cteb.queries { + cteb.args.Replace(cteb.queryBuilderVars[i], b) + } + }) +} + +// CTEBuilder is a CTE (Common Table Expression) builder. +type CTEBuilder struct { + recursive bool + queries []*CTEQueryBuilder + queryBuilderVars []string + + args *Args + + injection *injection + marker injectionMarker +} + +var _ Builder = new(CTEBuilder) + +// With sets the CTE name and columns. +func (cteb *CTEBuilder) With(queries ...*CTEQueryBuilder) *CTEBuilder { + queryBuilderVars := make([]string, 0, len(queries)) + + for _, query := range queries { + queryBuilderVars = append(queryBuilderVars, cteb.args.Add(query)) + } + + cteb.queries = append([]*CTEQueryBuilder(nil), queries...) + cteb.queryBuilderVars = queryBuilderVars + cteb.marker = cteMarkerAfterWith + return cteb +} + +// WithRecursive sets the CTE name and columns and turns on the RECURSIVE keyword. +func (cteb *CTEBuilder) WithRecursive(queries ...*CTEQueryBuilder) *CTEBuilder { + cteb.With(queries...).recursive = true + return cteb +} + +// Select creates a new SelectBuilder to build a SELECT statement using this CTE. +func (cteb *CTEBuilder) Select(col ...string) *SelectBuilder { + sb := cteb.args.Flavor.NewSelectBuilder() + return sb.With(cteb).Select(col...) +} + +// DeleteFrom creates a new DeleteBuilder to build a DELETE statement using this CTE. +func (cteb *CTEBuilder) DeleteFrom(table string) *DeleteBuilder { + db := cteb.args.Flavor.NewDeleteBuilder() + return db.With(cteb).DeleteFrom(table) +} + +// Update creates a new UpdateBuilder to build an UPDATE statement using this CTE. +func (cteb *CTEBuilder) Update(table string) *UpdateBuilder { + ub := cteb.args.Flavor.NewUpdateBuilder() + return ub.With(cteb).Update(table) +} + +// String returns the compiled CTE string. +func (cteb *CTEBuilder) String() string { + sql, _ := cteb.Build() + return sql +} + +// Build returns compiled CTE string and args. +func (cteb *CTEBuilder) Build() (sql string, args []interface{}) { + return cteb.BuildWithFlavor(cteb.args.Flavor) +} + +// BuildWithFlavor builds a CTE with the specified flavor and initial arguments. +func (cteb *CTEBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + buf := newStringBuilder() + cteb.injection.WriteTo(buf, cteMarkerInit) + + if len(cteb.queryBuilderVars) > 0 { + buf.WriteLeadingString("WITH ") + if cteb.recursive { + buf.WriteString("RECURSIVE ") + } + buf.WriteStrings(cteb.queryBuilderVars, ", ") + } + + cteb.injection.WriteTo(buf, cteMarkerAfterWith) + return cteb.args.CompileWithFlavor(buf.String(), flavor, initialArg...) +} + +// SetFlavor sets the flavor of compiled sql. +func (cteb *CTEBuilder) SetFlavor(flavor Flavor) (old Flavor) { + old = cteb.args.Flavor + cteb.args.Flavor = flavor + return +} + +// Flavor returns flavor of builder +func (cteb *CTEBuilder) Flavor() Flavor { + return cteb.args.Flavor +} + +// SQL adds an arbitrary sql to current position. +func (cteb *CTEBuilder) SQL(sql string) *CTEBuilder { + cteb.injection.SQL(cteb.marker, sql) + return cteb +} + +// TableNames returns all table names in a CTE. +func (cteb *CTEBuilder) TableNames() []string { + if len(cteb.queryBuilderVars) == 0 { + return nil + } + + tableNames := make([]string, 0, len(cteb.queries)) + + for _, query := range cteb.queries { + tableNames = append(tableNames, query.TableName()) + } + + return tableNames +} + +// tableNamesForFrom returns a list of table names which should be automatically added to FROM clause. +// It's not public, as this feature is designed only for SelectBuilder/UpdateBuilder/DeleteBuilder right now. +func (cteb *CTEBuilder) tableNamesForFrom() []string { + cnt := 0 + + // ShouldAddToTableList() unlikely returns true. + // Count it before allocating any memory for better performance. + for _, query := range cteb.queries { + if query.ShouldAddToTableList() { + cnt++ + } + } + + if cnt == 0 { + return nil + } + + tableNames := make([]string, 0, cnt) + + for _, query := range cteb.queries { + if query.ShouldAddToTableList() { + tableNames = append(tableNames, query.TableName()) + } + } + + return tableNames +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/ctequery.go b/vendor/github.com/huandu/go-sqlbuilder/ctequery.go new file mode 100644 index 000000000..1663c0bef --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/ctequery.go @@ -0,0 +1,166 @@ +// Copyright 2024 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "reflect" + + "github.com/huandu/go-clone" +) + +const ( + cteQueryMarkerInit injectionMarker = iota + cteQueryMarkerAfterTable + cteQueryMarkerAfterAs +) + +// CTETable creates a new CTE query builder with default flavor, marking it as a table. +// +// The resulting CTE query can be used in a `SelectBuilder“, where its table name will be +// automatically included in the FROM clause. +func CTETable(name string, cols ...string) *CTEQueryBuilder { + return DefaultFlavor.NewCTEQueryBuilder().AddToTableList().Table(name, cols...) +} + +// CTEQuery creates a new CTE query builder with default flavor. +func CTEQuery(name string, cols ...string) *CTEQueryBuilder { + return DefaultFlavor.NewCTEQueryBuilder().Table(name, cols...) +} + +func newCTEQueryBuilder() *CTEQueryBuilder { + return &CTEQueryBuilder{ + args: &Args{}, + injection: newInjection(), + } +} + +// Clone returns a deep copy of CTEQueryBuilder. +// It's useful when you want to create a base builder and clone it to build similar queries. +func (ctetb *CTEQueryBuilder) Clone() *CTEQueryBuilder { + return clone.Clone(ctetb).(*CTEQueryBuilder) +} + +func init() { + t := reflect.TypeOf(CTEQueryBuilder{}) + clone.SetCustomFunc(t, func(allocator *clone.Allocator, old, new reflect.Value) { + cloned := allocator.CloneSlowly(old) + new.Set(cloned) + + ctetb := cloned.Addr().Interface().(*CTEQueryBuilder) + ctetb.args.Replace(ctetb.builderVar, ctetb.builder) + }) +} + +// CTEQueryBuilder is a builder to build one table in CTE (Common Table Expression). +type CTEQueryBuilder struct { + name string + cols []string + builder Builder + builderVar string + + // if true, this query's table name will be automatically added to the table list + // in FROM clause of SELECT statement. + autoAddToTableList bool + + args *Args + + injection *injection + marker injectionMarker +} + +var _ Builder = new(CTEQueryBuilder) + +// CTETableBuilder is an alias of CTEQueryBuilder for backward compatibility. +// +// Deprecated: use CTEQueryBuilder instead. +type CTETableBuilder = CTEQueryBuilder + +// Table sets the table name and columns in a CTE table. +func (ctetb *CTEQueryBuilder) Table(name string, cols ...string) *CTEQueryBuilder { + ctetb.name = name + ctetb.cols = cols + ctetb.marker = cteQueryMarkerAfterTable + return ctetb +} + +// As sets the builder to select data. +func (ctetb *CTEQueryBuilder) As(builder Builder) *CTEQueryBuilder { + ctetb.builder = builder + ctetb.builderVar = ctetb.args.Add(builder) + ctetb.marker = cteQueryMarkerAfterAs + return ctetb +} + +// AddToTableList sets flag to add table name to table list in FROM clause of SELECT statement. +func (ctetb *CTEQueryBuilder) AddToTableList() *CTEQueryBuilder { + ctetb.autoAddToTableList = true + return ctetb +} + +// ShouldAddToTableList returns flag to add table name to table list in FROM clause of SELECT statement. +func (ctetb *CTEQueryBuilder) ShouldAddToTableList() bool { + return ctetb.autoAddToTableList +} + +// String returns the compiled CTE string. +func (ctetb *CTEQueryBuilder) String() string { + sql, _ := ctetb.Build() + return sql +} + +// Build returns compiled CTE string and args. +func (ctetb *CTEQueryBuilder) Build() (sql string, args []interface{}) { + return ctetb.BuildWithFlavor(ctetb.args.Flavor) +} + +// BuildWithFlavor builds a CTE with the specified flavor and initial arguments. +func (ctetb *CTEQueryBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + buf := newStringBuilder() + ctetb.injection.WriteTo(buf, cteQueryMarkerInit) + + if ctetb.name != "" { + buf.WriteLeadingString(ctetb.name) + + if len(ctetb.cols) > 0 { + buf.WriteLeadingString("(") + buf.WriteStrings(ctetb.cols, ", ") + buf.WriteString(")") + } + + ctetb.injection.WriteTo(buf, cteQueryMarkerAfterTable) + } + + if ctetb.builderVar != "" { + buf.WriteLeadingString("AS (") + buf.WriteString(ctetb.builderVar) + buf.WriteRune(')') + + ctetb.injection.WriteTo(buf, cteQueryMarkerAfterAs) + } + + return ctetb.args.CompileWithFlavor(buf.String(), flavor, initialArg...) +} + +// SetFlavor sets the flavor of compiled sql. +func (ctetb *CTEQueryBuilder) SetFlavor(flavor Flavor) (old Flavor) { + old = ctetb.args.Flavor + ctetb.args.Flavor = flavor + return +} + +// Flavor returns flavor of builder +func (ctetb *CTEQueryBuilder) Flavor() Flavor { + return ctetb.args.Flavor +} + +// SQL adds an arbitrary sql to current position. +func (ctetb *CTEQueryBuilder) SQL(sql string) *CTEQueryBuilder { + ctetb.injection.SQL(ctetb.marker, sql) + return ctetb +} + +// TableName returns the CTE table name. +func (ctetb *CTEQueryBuilder) TableName() string { + return ctetb.name +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/delete.go b/vendor/github.com/huandu/go-sqlbuilder/delete.go new file mode 100644 index 000000000..a96295bfb --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/delete.go @@ -0,0 +1,283 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "reflect" + + "github.com/huandu/go-clone" +) + +const ( + deleteMarkerInit injectionMarker = iota + deleteMarkerAfterWith + deleteMarkerAfterDeleteFrom + deleteMarkerAfterWhere + deleteMarkerAfterOrderBy + deleteMarkerAfterLimit + deleteMarkerAfterReturning +) + +// NewDeleteBuilder creates a new DELETE builder. +func NewDeleteBuilder() *DeleteBuilder { + return DefaultFlavor.NewDeleteBuilder() +} + +func newDeleteBuilder() *DeleteBuilder { + args := &Args{} + proxy := &whereClauseProxy{} + return &DeleteBuilder{ + whereClauseProxy: proxy, + whereClauseExpr: args.Add(proxy), + + Cond: Cond{ + Args: args, + }, + args: args, + injection: newInjection(), + } +} + +// Clone returns a deep copy of DeleteBuilder. +// It's useful when you want to create a base builder and clone it to build similar queries. +func (db *DeleteBuilder) Clone() *DeleteBuilder { + return clone.Clone(db).(*DeleteBuilder) +} + +func init() { + t := reflect.TypeOf(DeleteBuilder{}) + clone.SetCustomFunc(t, func(allocator *clone.Allocator, old, new reflect.Value) { + cloned := allocator.CloneSlowly(old) + new.Set(cloned) + + db := cloned.Addr().Interface().(*DeleteBuilder) + db.args.Replace(db.whereClauseExpr, db.whereClauseProxy) + db.args.Replace(db.cteBuilderVar, db.cteBuilder) + }) +} + +// DeleteBuilder is a builder to build DELETE. +type DeleteBuilder struct { + *WhereClause + Cond + + whereClauseProxy *whereClauseProxy + whereClauseExpr string + + cteBuilderVar string + cteBuilder *CTEBuilder + + tables []string + orderByCols []string + order string + limitVar string + returning []string + + args *Args + + injection *injection + marker injectionMarker +} + +var _ Builder = new(DeleteBuilder) + +// DeleteFrom sets table name in DELETE. +func DeleteFrom(table ...string) *DeleteBuilder { + return DefaultFlavor.NewDeleteBuilder().DeleteFrom(table...) +} + +// With sets WITH clause (the Common Table Expression) before DELETE. +func (db *DeleteBuilder) With(builder *CTEBuilder) *DeleteBuilder { + db.marker = deleteMarkerAfterWith + db.cteBuilderVar = db.Var(builder) + db.cteBuilder = builder + return db +} + +// DeleteFrom sets table name in DELETE. +func (db *DeleteBuilder) DeleteFrom(table ...string) *DeleteBuilder { + db.tables = table + db.marker = deleteMarkerAfterDeleteFrom + return db +} + +// TableNames returns all table names in this DELETE statement. +func (db *DeleteBuilder) TableNames() []string { + var additionalTableNames []string + + if db.cteBuilder != nil { + additionalTableNames = db.cteBuilder.tableNamesForFrom() + } + + var tableNames []string + + if len(db.tables) > 0 && len(additionalTableNames) > 0 { + tableNames = make([]string, len(db.tables)+len(additionalTableNames)) + copy(tableNames, db.tables) + copy(tableNames[len(db.tables):], additionalTableNames) + } else if len(db.tables) > 0 { + tableNames = db.tables + } else if len(additionalTableNames) > 0 { + tableNames = additionalTableNames + } + + return tableNames +} + +// Where sets expressions of WHERE in DELETE. +func (db *DeleteBuilder) Where(andExpr ...string) *DeleteBuilder { + if len(andExpr) == 0 || estimateStringsBytes(andExpr) == 0 { + return db + } + + if db.WhereClause == nil { + db.WhereClause = NewWhereClause() + } + + db.WhereClause.AddWhereExpr(db.args, andExpr...) + db.marker = deleteMarkerAfterWhere + return db +} + +// AddWhereClause adds all clauses in the whereClause to SELECT. +func (db *DeleteBuilder) AddWhereClause(whereClause *WhereClause) *DeleteBuilder { + if db.WhereClause == nil { + db.WhereClause = NewWhereClause() + } + + db.WhereClause.AddWhereClause(whereClause) + return db +} + +// OrderBy sets columns of ORDER BY in DELETE. +func (db *DeleteBuilder) OrderBy(col ...string) *DeleteBuilder { + db.orderByCols = col + db.marker = deleteMarkerAfterOrderBy + return db +} + +// Asc sets order of ORDER BY to ASC. +func (db *DeleteBuilder) Asc() *DeleteBuilder { + db.order = "ASC" + db.marker = deleteMarkerAfterOrderBy + return db +} + +// Desc sets order of ORDER BY to DESC. +func (db *DeleteBuilder) Desc() *DeleteBuilder { + db.order = "DESC" + db.marker = deleteMarkerAfterOrderBy + return db +} + +// Limit sets the LIMIT in DELETE. +func (db *DeleteBuilder) Limit(limit int) *DeleteBuilder { + if limit < 0 { + db.limitVar = "" + return db + } + + db.limitVar = db.Var(limit) + db.marker = deleteMarkerAfterLimit + return db +} + +// Returning sets returning columns. +// For DBMS that doesn't support RETURNING, e.g. MySQL, it will be ignored. +func (db *DeleteBuilder) Returning(col ...string) *DeleteBuilder { + db.returning = col + db.marker = deleteMarkerAfterReturning + return db +} + +// String returns the compiled DELETE string. +func (db *DeleteBuilder) String() string { + s, _ := db.Build() + return s +} + +// Build returns compiled DELETE string and args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (db *DeleteBuilder) Build() (sql string, args []interface{}) { + return db.BuildWithFlavor(db.args.Flavor) +} + +// BuildWithFlavor returns compiled DELETE string and args with flavor and initial args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (db *DeleteBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + buf := newStringBuilder() + db.injection.WriteTo(buf, deleteMarkerInit) + + if db.cteBuilder != nil { + buf.WriteLeadingString(db.cteBuilderVar) + db.injection.WriteTo(buf, deleteMarkerAfterWith) + } + + tableNames := db.TableNames() + + if len(tableNames) > 0 { + buf.WriteLeadingString("DELETE FROM ") + buf.WriteStrings(tableNames, ", ") + } + + db.injection.WriteTo(buf, deleteMarkerAfterDeleteFrom) + + if db.WhereClause != nil { + db.whereClauseProxy.WhereClause = db.WhereClause + defer func() { + db.whereClauseProxy.WhereClause = nil + }() + + buf.WriteLeadingString(db.whereClauseExpr) + db.injection.WriteTo(buf, deleteMarkerAfterWhere) + } + + if len(db.orderByCols) > 0 { + buf.WriteLeadingString("ORDER BY ") + buf.WriteStrings(db.orderByCols, ", ") + + if db.order != "" { + buf.WriteRune(' ') + buf.WriteString(db.order) + } + + db.injection.WriteTo(buf, deleteMarkerAfterOrderBy) + } + + if len(db.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(db.limitVar) + + db.injection.WriteTo(buf, deleteMarkerAfterLimit) + } + + if flavor == PostgreSQL || flavor == SQLite { + if len(db.returning) > 0 { + buf.WriteLeadingString("RETURNING ") + buf.WriteStrings(db.returning, ", ") + } + + db.injection.WriteTo(buf, deleteMarkerAfterReturning) + } + + return db.args.CompileWithFlavor(buf.String(), flavor, initialArg...) +} + +// SetFlavor sets the flavor of compiled sql. +func (db *DeleteBuilder) SetFlavor(flavor Flavor) (old Flavor) { + old = db.args.Flavor + db.args.Flavor = flavor + return +} + +// Flavor returns flavor of builder +func (db *DeleteBuilder) Flavor() Flavor { + return db.args.Flavor +} + +// SQL adds an arbitrary sql to current position. +func (db *DeleteBuilder) SQL(sql string) *DeleteBuilder { + db.injection.SQL(db.marker, sql) + return db +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/doc.go b/vendor/github.com/huandu/go-sqlbuilder/doc.go new file mode 100644 index 000000000..44cc2a5f6 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/doc.go @@ -0,0 +1,5 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package sqlbuilder is a flexible and powerful tool to build SQL string and associated args. +package sqlbuilder diff --git a/vendor/github.com/huandu/go-sqlbuilder/fieldmapper.go b/vendor/github.com/huandu/go-sqlbuilder/fieldmapper.go new file mode 100644 index 000000000..4293ab4b0 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/fieldmapper.go @@ -0,0 +1,46 @@ +package sqlbuilder + +import ( + "reflect" + + "github.com/huandu/xstrings" +) + +var ( + // DefaultFieldMapper is the default field name to table column name mapper func. + // It's nil by default which means field name will be kept as it is. + // + // If a Struct has its own mapper func, the DefaultFieldMapper is ignored in this Struct. + // Field tag has precedence over all kinds of field mapper functions. + // + // Field mapper is called only once on a Struct when the Struct is used to create builder for the first time. + DefaultFieldMapper FieldMapperFunc + + // DefaultGetAlias is the default alias and dbtag get func + DefaultGetAlias GetAliasFunc +) + +func init() { + DefaultGetAlias = func(field *reflect.StructField) (alias string, dbtag string) { + dbtag = field.Tag.Get(DBTag) + alias = dbtag + return + } +} + +// FieldMapperFunc is a func to map struct field names to column names, +// which will be used in query as columns. +type FieldMapperFunc func(name string) string + +// SnakeCaseMapper is a field mapper which can convert field name from CamelCase to snake_case. +// +// For instance, it will convert "MyField" to "my_field". +// +// SnakeCaseMapper uses package "xstrings" to do the conversion. +// See https://pkg.go.dev/github.com/huandu/xstrings#ToSnakeCase for conversion rules. +func SnakeCaseMapper(field string) string { + return xstrings.ToSnakeCase(field) +} + +// GetAliasFunc is a func to get alias and dbtag +type GetAliasFunc func(field *reflect.StructField) (alias string, dbtag string) diff --git a/vendor/github.com/huandu/go-sqlbuilder/flavor.go b/vendor/github.com/huandu/go-sqlbuilder/flavor.go new file mode 100644 index 000000000..26c8c283a --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/flavor.go @@ -0,0 +1,210 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "errors" + "fmt" +) + +// Supported flavors. +const ( + invalidFlavor Flavor = iota + + MySQL + PostgreSQL + SQLite + SQLServer + CQL + ClickHouse + Presto + Oracle + Informix + Doris +) + +var ( + // DefaultFlavor is the default flavor for all builders. + DefaultFlavor = MySQL +) + +var ( + // ErrInterpolateNotImplemented means the method or feature is not implemented right now. + ErrInterpolateNotImplemented = errors.New("go-sqlbuilder: interpolation for this flavor is not implemented") + + // ErrInterpolateMissingArgs means there are some args missing in query, so it's not possible to + // prepare a query with such args. + ErrInterpolateMissingArgs = errors.New("go-sqlbuilder: not enough args when interpolating") + + // ErrInterpolateUnsupportedArgs means that some types of the args are not supported. + ErrInterpolateUnsupportedArgs = errors.New("go-sqlbuilder: unsupported args when interpolating") +) + +// Flavor is the flag to control the format of compiled sql. +type Flavor int + +// String returns the name of f. +func (f Flavor) String() string { + switch f { + case MySQL: + return "MySQL" + case PostgreSQL: + return "PostgreSQL" + case SQLite: + return "SQLite" + case SQLServer: + return "SQLServer" + case CQL: + return "CQL" + case ClickHouse: + return "ClickHouse" + case Presto: + return "Presto" + case Oracle: + return "Oracle" + case Informix: + return "Informix" + case Doris: + return "Doris" + } + + return "" +} + +// Interpolate parses sql returned by `Args#Compile` or `Builder`, +// and interpolate args to replace placeholders in the sql. +// +// If there are some args missing in sql, e.g. the number of placeholders are larger than len(args), +// returns ErrMissingArgs error. +func (f Flavor) Interpolate(sql string, args []interface{}) (string, error) { + switch f { + case MySQL: + return mysqlInterpolate(sql, args...) + case PostgreSQL: + return postgresqlInterpolate(sql, args...) + case SQLite: + return sqliteInterpolate(sql, args...) + case SQLServer: + return sqlserverInterpolate(sql, args...) + case CQL: + return cqlInterpolate(sql, args...) + case ClickHouse: + return clickhouseInterpolate(sql, args...) + case Presto: + return prestoInterpolate(sql, args...) + case Oracle: + return oracleInterpolate(sql, args...) + case Informix: + return informixInterpolate(sql, args...) + case Doris: + return dorisInterpolate(sql, args...) + } + + return "", ErrInterpolateNotImplemented +} + +// NewCreateTableBuilder creates a new CREATE TABLE builder with flavor. +func (f Flavor) NewCreateTableBuilder() *CreateTableBuilder { + b := newCreateTableBuilder() + b.SetFlavor(f) + return b +} + +// NewDeleteBuilder creates a new DELETE builder with flavor. +func (f Flavor) NewDeleteBuilder() *DeleteBuilder { + b := newDeleteBuilder() + b.SetFlavor(f) + return b +} + +// NewInsertBuilder creates a new INSERT builder with flavor. +func (f Flavor) NewInsertBuilder() *InsertBuilder { + b := newInsertBuilder() + b.SetFlavor(f) + return b +} + +// NewSelectBuilder creates a new SELECT builder with flavor. +func (f Flavor) NewSelectBuilder() *SelectBuilder { + b := newSelectBuilder() + b.SetFlavor(f) + return b +} + +// NewUpdateBuilder creates a new UPDATE builder with flavor. +func (f Flavor) NewUpdateBuilder() *UpdateBuilder { + b := newUpdateBuilder() + b.SetFlavor(f) + return b +} + +// NewUnionBuilder creates a new UNION builder with flavor. +func (f Flavor) NewUnionBuilder() *UnionBuilder { + b := newUnionBuilder() + b.SetFlavor(f) + return b +} + +// NewCTEBuilder creates a new CTE builder with flavor. +func (f Flavor) NewCTEBuilder() *CTEBuilder { + b := newCTEBuilder() + b.SetFlavor(f) + return b +} + +// NewCTETableBuilder creates a new CTE table builder with flavor. +func (f Flavor) NewCTEQueryBuilder() *CTEQueryBuilder { + b := newCTEQueryBuilder() + b.SetFlavor(f) + return b +} + +// Quote adds quote for name to make sure the name can be used safely +// as table name or field name. +// +// - For MySQL, use back quote (`) to quote name; +// - For PostgreSQL, SQL Server and SQLite, use double quote (") to quote name. +func (f Flavor) Quote(name string) string { + switch f { + case MySQL, ClickHouse, Doris: + return fmt.Sprintf("`%s`", name) + case PostgreSQL, SQLServer, SQLite, Presto, Oracle, Informix: + return fmt.Sprintf(`"%s"`, name) + case CQL: + return fmt.Sprintf("'%s'", name) + } + + return name +} + +// PrepareInsertIgnore prepares the insert builder to build insert ignore SQL statement based on the sql flavor +func (f Flavor) PrepareInsertIgnore(table string, ib *InsertBuilder) { + switch ib.args.Flavor { + case MySQL, Oracle: + ib.verb = "INSERT IGNORE" + + case PostgreSQL: + // see https://www.postgresql.org/docs/current/sql-insert.html + ib.verb = "INSERT" + // add sql statement at the end after values, i.e. INSERT INTO ... ON CONFLICT DO NOTHING + ib.marker = insertMarkerAfterValues + ib.SQL("ON CONFLICT DO NOTHING") + + case SQLite: + // see https://www.sqlite.org/lang_insert.html + ib.verb = "INSERT OR IGNORE" + + case ClickHouse, CQL, SQLServer, Presto, Informix, Doris: + // All other databases do not support insert ignore + ib.verb = "INSERT" + + default: + // panic if the db flavor is not supported + panic(fmt.Errorf("unsupported db flavor: %s", ib.args.Flavor.String())) + } + + // Set the table and reset the marker right after insert into + ib.table = Escape(table) + ib.marker = insertMarkerAfterInsertInto +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/injection.go b/vendor/github.com/huandu/go-sqlbuilder/injection.go new file mode 100644 index 000000000..9df2745c3 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/injection.go @@ -0,0 +1,37 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +// injection is a helper type to manage injected SQLs in all builders. +type injection struct { + markerSQLs map[injectionMarker][]string +} + +type injectionMarker int + +// newInjection creates a new injection. +func newInjection() *injection { + return &injection{ + markerSQLs: map[injectionMarker][]string{}, + } +} + +// SQL adds sql to injection's sql list. +// All sqls inside injection is ordered by marker in ascending order. +func (injection *injection) SQL(marker injectionMarker, sql string) { + injection.markerSQLs[marker] = append(injection.markerSQLs[marker], sql) +} + +// WriteTo joins all SQL strings at the same marker value with blank (" ") +// and writes the joined value to buf. +func (injection *injection) WriteTo(buf *stringBuilder, marker injectionMarker) { + sqls := injection.markerSQLs[marker] + + if len(sqls) == 0 { + return + } + + buf.WriteLeadingString("") + buf.WriteStrings(sqls, " ") +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/insert.go b/vendor/github.com/huandu/go-sqlbuilder/insert.go new file mode 100644 index 000000000..769813934 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/insert.go @@ -0,0 +1,254 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "fmt" + "strings" + + "github.com/huandu/go-clone" +) + +const ( + insertMarkerInit injectionMarker = iota + insertMarkerAfterInsertInto + insertMarkerAfterCols + insertMarkerAfterValues + insertMarkerAfterSelect + insertMarkerAfterReturning +) + +// NewInsertBuilder creates a new INSERT builder. +func NewInsertBuilder() *InsertBuilder { + return DefaultFlavor.NewInsertBuilder() +} + +func newInsertBuilder() *InsertBuilder { + args := &Args{} + return &InsertBuilder{ + verb: "INSERT", + args: args, + injection: newInjection(), + } +} + +// Clone returns a deep copy of InsertBuilder. +// It's useful when you want to create a base builder and clone it to build similar queries. +func (ib *InsertBuilder) Clone() *InsertBuilder { + return clone.Clone(ib).(*InsertBuilder) +} + +// InsertBuilder is a builder to build INSERT. +type InsertBuilder struct { + verb string + table string + cols []string + values [][]string + returning []string + + args *Args + + injection *injection + marker injectionMarker + + sbHolder string +} + +var _ Builder = new(InsertBuilder) + +// InsertInto sets table name in INSERT. +func InsertInto(table string) *InsertBuilder { + return DefaultFlavor.NewInsertBuilder().InsertInto(table) +} + +// InsertInto sets table name in INSERT. +func (ib *InsertBuilder) InsertInto(table string) *InsertBuilder { + ib.table = Escape(table) + ib.marker = insertMarkerAfterInsertInto + return ib +} + +// InsertIgnoreInto sets table name in INSERT IGNORE. +func InsertIgnoreInto(table string) *InsertBuilder { + return DefaultFlavor.NewInsertBuilder().InsertIgnoreInto(table) +} + +// InsertIgnoreInto sets table name in INSERT IGNORE. +func (ib *InsertBuilder) InsertIgnoreInto(table string) *InsertBuilder { + ib.args.Flavor.PrepareInsertIgnore(table, ib) + return ib +} + +// ReplaceInto sets table name and changes the verb of ib to REPLACE. +// REPLACE INTO is a MySQL extension to the SQL standard. +func ReplaceInto(table string) *InsertBuilder { + return DefaultFlavor.NewInsertBuilder().ReplaceInto(table) +} + +// ReplaceInto sets table name and changes the verb of ib to REPLACE. +// REPLACE INTO is a MySQL extension to the SQL standard. +func (ib *InsertBuilder) ReplaceInto(table string) *InsertBuilder { + ib.verb = "REPLACE" + ib.table = Escape(table) + ib.marker = insertMarkerAfterInsertInto + return ib +} + +// Cols sets columns in INSERT. +func (ib *InsertBuilder) Cols(col ...string) *InsertBuilder { + ib.cols = EscapeAll(col...) + ib.marker = insertMarkerAfterCols + return ib +} + +// Select returns a new SelectBuilder to build a SELECT statement inside the INSERT INTO. +func (isb *InsertBuilder) Select(col ...string) *SelectBuilder { + sb := Select(col...) + isb.sbHolder = isb.args.Add(sb) + return sb +} + +// Values adds a list of values for a row in INSERT. +func (ib *InsertBuilder) Values(value ...interface{}) *InsertBuilder { + placeholders := make([]string, 0, len(value)) + + for _, v := range value { + placeholders = append(placeholders, ib.args.Add(v)) + } + + ib.values = append(ib.values, placeholders) + ib.marker = insertMarkerAfterValues + return ib +} + +// Returning sets returning columns. +// For DBMS that doesn't support RETURNING, e.g. MySQL, it will be ignored. +func (ib *InsertBuilder) Returning(col ...string) *InsertBuilder { + ib.returning = col + ib.marker = insertMarkerAfterReturning + return ib +} + +// NumValue returns the number of values to insert. +func (ib *InsertBuilder) NumValue() int { + return len(ib.values) +} + +// String returns the compiled INSERT string. +func (ib *InsertBuilder) String() string { + s, _ := ib.Build() + return s +} + +// Build returns compiled INSERT string and args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (ib *InsertBuilder) Build() (sql string, args []interface{}) { + return ib.BuildWithFlavor(ib.args.Flavor) +} + +// BuildWithFlavor returns compiled INSERT string and args with flavor and initial args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (ib *InsertBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + buf := newStringBuilder() + ib.injection.WriteTo(buf, insertMarkerInit) + + if len(ib.values) > 1 && ib.args.Flavor == Oracle { + buf.WriteLeadingString(ib.verb) + buf.WriteString(" ALL") + + for _, v := range ib.values { + if len(ib.table) > 0 { + buf.WriteString(" INTO ") + buf.WriteString(ib.table) + } + ib.injection.WriteTo(buf, insertMarkerAfterInsertInto) + if len(ib.cols) > 0 { + buf.WriteLeadingString("(") + buf.WriteStrings(ib.cols, ", ") + buf.WriteString(")") + + ib.injection.WriteTo(buf, insertMarkerAfterCols) + } + + buf.WriteLeadingString("VALUES ") + values := make([]string, 0, len(ib.values)) + values = append(values, fmt.Sprintf("(%v)", strings.Join(v, ", "))) + buf.WriteStrings(values, ", ") + } + + buf.WriteString(" SELECT 1 from DUAL") + + ib.injection.WriteTo(buf, insertMarkerAfterValues) + + return ib.args.CompileWithFlavor(buf.String(), flavor, initialArg...) + } + + if len(ib.table) > 0 { + buf.WriteLeadingString(ib.verb) + buf.WriteString(" INTO ") + buf.WriteString(ib.table) + } + + ib.injection.WriteTo(buf, insertMarkerAfterInsertInto) + + if len(ib.cols) > 0 { + buf.WriteLeadingString("(") + buf.WriteStrings(ib.cols, ", ") + buf.WriteString(")") + + ib.injection.WriteTo(buf, insertMarkerAfterCols) + } + + if ib.sbHolder != "" { + buf.WriteString(" ") + buf.WriteString(ib.sbHolder) + + ib.injection.WriteTo(buf, insertMarkerAfterSelect) + } else if len(ib.values) > 0 { + buf.WriteLeadingString("VALUES ") + values := make([]string, 0, len(ib.values)) + + for _, v := range ib.values { + values = append(values, fmt.Sprintf("(%v)", strings.Join(v, ", "))) + } + + buf.WriteStrings(values, ", ") + } + + ib.injection.WriteTo(buf, insertMarkerAfterValues) + + if flavor == PostgreSQL || flavor == SQLite { + if len(ib.returning) > 0 { + buf.WriteLeadingString("RETURNING ") + buf.WriteStrings(ib.returning, ", ") + } + + ib.injection.WriteTo(buf, insertMarkerAfterReturning) + } + + return ib.args.CompileWithFlavor(buf.String(), flavor, initialArg...) +} + +// SetFlavor sets the flavor of compiled sql. +func (ib *InsertBuilder) SetFlavor(flavor Flavor) (old Flavor) { + old = ib.args.Flavor + ib.args.Flavor = flavor + return +} + +// Flavor returns flavor of builder +func (ib *InsertBuilder) Flavor() Flavor { + return ib.args.Flavor +} + +// Var returns a placeholder for value. +func (ib *InsertBuilder) Var(arg interface{}) string { + return ib.args.Add(arg) +} + +// SQL adds an arbitrary sql to current position. +func (ib *InsertBuilder) SQL(sql string) *InsertBuilder { + ib.injection.SQL(ib.marker, sql) + return ib +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/interpolate.go b/vendor/github.com/huandu/go-sqlbuilder/interpolate.go new file mode 100644 index 000000000..c6eb1db71 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/interpolate.go @@ -0,0 +1,827 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "database/sql/driver" + "fmt" + "reflect" + "strconv" + "time" + "unicode" + "unicode/utf8" + "unsafe" +) + +// mysqlInterpolate parses query and replace all "?" with encoded args. +// If there are more "?" than len(args), returns ErrMissingArgs. +// Otherwise, if there are less "?" than len(args), the redundant args are omitted. +func mysqlInterpolate(query string, args ...interface{}) (string, error) { + return mysqlLikeInterpolate(MySQL, query, args...) +} + +func mysqlLikeInterpolate(flavor Flavor, query string, args ...interface{}) (string, error) { + // Roughly estimate the size to avoid useless memory allocation and copy. + buf := make([]byte, 0, len(query)+len(args)*20) + + var quote rune + var err error + cnt := 0 + max := len(args) + escaping := false + offset := 0 + target := query + r, sz := utf8.DecodeRuneInString(target) + + for ; sz != 0; r, sz = utf8.DecodeRuneInString(target) { + offset += sz + target = query[offset:] + + if escaping { + escaping = false + continue + } + + switch r { + case '?': + if quote != 0 { + continue + } + + if cnt >= max { + return "", ErrInterpolateMissingArgs + } + + buf = append(buf, query[:offset-sz]...) + buf, err = encodeValue(buf, args[cnt], flavor) + + if err != nil { + return "", err + } + + query = target + offset = 0 + cnt++ + + case '\'': + if quote == '\'' { + quote = 0 + continue + } + + if quote == 0 { + quote = '\'' + } + + case '"': + if quote == '"' { + quote = 0 + continue + } + + if quote == 0 { + quote = '"' + } + + case '`': + if quote == '`' { + quote = 0 + continue + } + + if quote == 0 { + quote = '`' + } + + case '\\': + if quote != 0 { + escaping = true + } + } + } + + buf = append(buf, query...) + return *(*string)(unsafe.Pointer(&buf)), nil +} + +// postgresqlInterpolate parses query and replace all "$*" with encoded args. +// If there are more "$*" than len(args), returns ErrMissingArgs. +// Otherwise, if there are less "$*" than len(args), the redundant args are omitted. +func postgresqlInterpolate(query string, args ...interface{}) (string, error) { + // Roughly estimate the size to avoid useless memory allocation and copy. + buf := make([]byte, 0, len(query)+len(args)*20) + + var quote rune + var dollarQuote string + var err error + var idx int64 + max := len(args) + escaping := false + offset := 0 + target := query + r, sz := utf8.DecodeRuneInString(target) + + for ; sz != 0; r, sz = utf8.DecodeRuneInString(target) { + offset += sz + target = query[offset:] + + if escaping { + escaping = false + continue + } + + switch r { + case '$': + if quote != 0 { + if quote != '$' { + continue + } + + // Try to find the end of dollar quote. + pos := offset + + for r, sz = utf8.DecodeRuneInString(target); sz != 0 && r != '$'; r, sz = utf8.DecodeRuneInString(target) { + pos += sz + target = query[pos:] + } + + if sz == 0 { + break + } + + if r == '$' { + dq := query[offset : pos+sz] + offset = pos + target = query[offset:] + + if dq == dollarQuote { + quote = 0 + dollarQuote = "" + offset += sz + target = query[offset:] + } + + continue + } + + continue + } + + oldSz := sz + pos := offset + r, sz = utf8.DecodeRuneInString(target) + + if '1' <= r && r <= '9' { + // A placeholder is found. + pos += sz + target = query[pos:] + + for r, sz = utf8.DecodeRuneInString(target); sz != 0 && '0' <= r && r <= '9'; r, sz = utf8.DecodeRuneInString(target) { + pos += sz + target = query[pos:] + } + + idx, err = strconv.ParseInt(query[offset:pos], 10, strconv.IntSize) + + if err != nil { + return "", err + } + + if int(idx) >= max+1 { + return "", ErrInterpolateMissingArgs + } + + buf = append(buf, query[:offset-oldSz]...) + buf, err = encodeValue(buf, args[idx-1], PostgreSQL) + + if err != nil { + return "", err + } + + query = target + offset = 0 + + if sz == 0 { + break + } + + continue + } + + // Try to find the beginning of dollar quote. + for ; sz != 0 && r != '$' && unicode.IsLetter(r); r, sz = utf8.DecodeRuneInString(target) { + pos += sz + target = query[pos:] + } + + if sz == 0 { + break + } + + if !unicode.IsLetter(r) && r != '$' { + continue + } + + pos += sz + quote = '$' + dollarQuote = query[offset:pos] + offset = pos + target = query[offset:] + + case '\'': + if quote == '\'' { + // PostgreSQL uses two single quotes to represent one single quote. + r, sz = utf8.DecodeRuneInString(target) + + if r == '\'' { + offset += sz + target = query[offset:] + continue + } + + quote = 0 + continue + } + + if quote == 0 { + quote = '\'' + } + + case '"': + if quote == '"' { + quote = 0 + continue + } + + if quote == 0 { + quote = '"' + } + + case '\\': + if quote == '\'' || quote == '"' { + escaping = true + } + } + } + + buf = append(buf, query...) + return *(*string)(unsafe.Pointer(&buf)), nil +} + +// sqlserverInterpolate parses query and replace all "@p*" with encoded args. +// If there are more "@p*" than len(args), returns ErrMissingArgs. +// Otherwise, if there are less "@p*" than len(args), the redundant args are omitted. +func sqlserverInterpolate(query string, args ...interface{}) (string, error) { + // Roughly estimate the size to avoid useless memory allocation and copy. + buf := make([]byte, 0, len(query)+len(args)*20) + + var quote rune + var err error + var idx int64 + max := len(args) + escaping := false + offset := 0 + target := query + r, sz := utf8.DecodeRuneInString(target) + + for ; sz != 0; r, sz = utf8.DecodeRuneInString(target) { + offset += sz + target = query[offset:] + + if escaping { + escaping = false + continue + } + + switch r { + case '@': + if quote != 0 { + continue + } + + oldSz := sz + pos := offset + r, sz = utf8.DecodeRuneInString(target) + + // Only parameters starting with @p or @P are interpolated. + if r != 'p' && r != 'P' { + continue + } + + pos += sz + target = query[pos:] + r, sz = utf8.DecodeRuneInString(target) + + if '1' <= r && r <= '9' { + // A placeholder is found. + pos += sz + target = query[pos:] + + for r, sz = utf8.DecodeRuneInString(target); sz != 0 && '0' <= r && r <= '9'; r, sz = utf8.DecodeRuneInString(target) { + pos += sz + target = query[pos:] + } + + idx, err = strconv.ParseInt(query[offset+1:pos], 10, strconv.IntSize) + + if err != nil { + return "", err + } + + if int(idx) >= max+1 { + return "", ErrInterpolateMissingArgs + } + + buf = append(buf, query[:offset-oldSz]...) + buf, err = encodeValue(buf, args[idx-1], SQLServer) + + if err != nil { + return "", err + } + + query = target + offset = 0 + + if sz == 0 { + break + } + + continue + } + + case '\'': + if quote == '\'' { + quote = 0 + continue + } + + if quote == 0 { + quote = '\'' + } + + case '"': + if quote == '"' { + quote = 0 + continue + } + + if quote == 0 { + quote = '"' + } + + case '\\': + if quote != 0 { + escaping = true + } + } + } + + buf = append(buf, query...) + return *(*string)(unsafe.Pointer(&buf)), nil +} + +// mysqlInterpolate works the same as MySQL interpolating. +func sqliteInterpolate(query string, args ...interface{}) (string, error) { + return mysqlLikeInterpolate(SQLite, query, args...) +} + +// cqlInterpolate works the same as MySQL interpolating. +func cqlInterpolate(query string, args ...interface{}) (string, error) { + return mysqlLikeInterpolate(CQL, query, args...) +} + +func clickhouseInterpolate(query string, args ...interface{}) (string, error) { + return mysqlLikeInterpolate(ClickHouse, query, args...) +} + +func prestoInterpolate(query string, args ...interface{}) (string, error) { + return mysqlLikeInterpolate(Presto, query, args...) +} + +func informixInterpolate(query string, args ...interface{}) (string, error) { + return mysqlLikeInterpolate(Informix, query, args...) +} + +func dorisInterpolate(query string, args ...interface{}) (string, error) { + return mysqlLikeInterpolate(Doris, query, args...) +} + +// oraclelInterpolate parses query and replace all ":*" with encoded args. +// If there are more ":*" than len(args), returns ErrMissingArgs. +// Otherwise, if there are less ":*" than len(args), the redundant args are omitted. +func oracleInterpolate(query string, args ...interface{}) (string, error) { + // Roughly estimate the size to avoid useless memory allocation and copy. + buf := make([]byte, 0, len(query)+len(args)*20) + + var quote rune + var dollarQuote string + var err error + var idx int64 + max := len(args) + escaping := false + offset := 0 + target := query + r, sz := utf8.DecodeRuneInString(target) + + for ; sz != 0; r, sz = utf8.DecodeRuneInString(target) { + offset += sz + target = query[offset:] + + if escaping { + escaping = false + continue + } + + switch r { + case ':': + if quote != 0 { + if quote != ':' { + continue + } + + // Try to find the end of dollar quote. + pos := offset + + for r, sz = utf8.DecodeRuneInString(target); sz != 0 && r != ':'; r, sz = utf8.DecodeRuneInString(target) { + pos += sz + target = query[pos:] + } + + if sz == 0 { + break + } + + if r == ':' { + dq := query[offset : pos+sz] + offset = pos + target = query[offset:] + + if dq == dollarQuote { + quote = 0 + dollarQuote = "" + offset += sz + target = query[offset:] + } + + continue + } + + continue + } + + oldSz := sz + pos := offset + r, sz = utf8.DecodeRuneInString(target) + + if '1' <= r && r <= '9' { + // A placeholder is found. + pos += sz + target = query[pos:] + + for r, sz = utf8.DecodeRuneInString(target); sz != 0 && '0' <= r && r <= '9'; r, sz = utf8.DecodeRuneInString(target) { + pos += sz + target = query[pos:] + } + + idx, err = strconv.ParseInt(query[offset:pos], 10, strconv.IntSize) + + if err != nil { + return "", err + } + + if int(idx) >= max+1 { + return "", ErrInterpolateMissingArgs + } + + buf = append(buf, query[:offset-oldSz]...) + buf, err = encodeValue(buf, args[idx-1], Oracle) + + if err != nil { + return "", err + } + + query = target + offset = 0 + + if sz == 0 { + break + } + + continue + } + + // Try to find the beginning of dollar quote. + for ; sz != 0 && r != ':' && unicode.IsLetter(r); r, sz = utf8.DecodeRuneInString(target) { + pos += sz + target = query[pos:] + } + + if sz == 0 { + break + } + + if !unicode.IsLetter(r) && r != ':' { + continue + } + + pos += sz + quote = ':' + dollarQuote = query[offset:pos] + offset = pos + target = query[offset:] + + case '\'': + if quote == '\'' { + // PostgreSQL uses two single quotes to represent one single quote. + r, sz = utf8.DecodeRuneInString(target) + + if r == '\'' { + offset += sz + target = query[offset:] + continue + } + + quote = 0 + continue + } + + if quote == 0 { + quote = '\'' + } + + case '"': + if quote == '"' { + quote = 0 + continue + } + + if quote == 0 { + quote = '"' + } + + case '\\': + if quote == '\'' || quote == '"' { + escaping = true + } + } + } + + buf = append(buf, query...) + return *(*string)(unsafe.Pointer(&buf)), nil +} + +func encodeValue(buf []byte, arg interface{}, flavor Flavor) ([]byte, error) { + switch v := arg.(type) { + case nil: + buf = append(buf, "NULL"...) + + case driver.Valuer: + if val, err := v.Value(); err != nil { + return nil, err + } else { + return encodeValue(buf, val, flavor) + } + + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + break + } + + // In SQL standard, the precision of fractional seconds in time literal is up to 6 digits. + // Round up v. + v = v.Add(500 * time.Nanosecond) + + switch flavor { + case MySQL, ClickHouse, Informix, Doris: + buf = append(buf, v.Format("'2006-01-02 15:04:05.999999'")...) + + case PostgreSQL: + buf = append(buf, v.Format("'2006-01-02 15:04:05.999999 MST'")...) + + case SQLite: + buf = append(buf, v.Format("'2006-01-02 15:04:05.000'")...) + + case SQLServer: + buf = append(buf, v.Format("'2006-01-02 15:04:05.999999 Z07:00'")...) + + case CQL: + buf = append(buf, v.Format("'2006-01-02 15:04:05.999999Z0700'")...) + + case Presto: + buf = append(buf, v.Format("'2006-01-02 15:04:05.000'")...) + + case Oracle: + buf = append(buf, "to_timestamp('"...) + buf = append(buf, v.Format("2006-01-02 15:04:05.999999")...) + buf = append(buf, "', 'YYYY-MM-DD HH24:MI:SS.FF')"...) + } + + default: + primative := reflect.ValueOf(arg) + + // Handle typed nil values (e.g. (*string)(nil), (*time.Time)(nil)) + // This check must come before fmt.Stringer check since nil pointers may implement interfaces + if !primative.IsValid() || (primative.Kind() == reflect.Ptr && primative.IsNil()) { + buf = append(buf, "NULL"...) + return buf, nil + } + + // Check for fmt.Stringer after nil pointer check + if stringer, ok := arg.(fmt.Stringer); ok { + buf = quoteStringValue(buf, stringer.String(), flavor) + return buf, nil + } + + switch k := primative.Kind(); k { + case reflect.Bool: + switch flavor { + case Oracle: + if primative.Bool() { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + default: + if primative.Bool() { + buf = append(buf, "TRUE"...) + } else { + buf = append(buf, "FALSE"...) + } + } + + case reflect.Int: + buf = strconv.AppendInt(buf, primative.Int(), 10) + + case reflect.Int8: + buf = strconv.AppendInt(buf, primative.Int(), 10) + + case reflect.Int16: + buf = strconv.AppendInt(buf, primative.Int(), 10) + + case reflect.Int32: + buf = strconv.AppendInt(buf, primative.Int(), 10) + + case reflect.Int64: + buf = strconv.AppendInt(buf, primative.Int(), 10) + + case reflect.Uint: + buf = strconv.AppendUint(buf, primative.Uint(), 10) + + case reflect.Uint8: + buf = strconv.AppendUint(buf, primative.Uint(), 10) + + case reflect.Uint16: + buf = strconv.AppendUint(buf, primative.Uint(), 10) + + case reflect.Uint32: + buf = strconv.AppendUint(buf, primative.Uint(), 10) + + case reflect.Uint64: + buf = strconv.AppendUint(buf, primative.Uint(), 10) + + case reflect.Float32: + buf = strconv.AppendFloat(buf, primative.Float(), 'g', -1, 32) + + case reflect.Float64: + buf = strconv.AppendFloat(buf, primative.Float(), 'g', -1, 64) + + case reflect.String: + buf = quoteStringValue(buf, primative.String(), flavor) + + case reflect.Slice, reflect.Array: + if k == reflect.Slice && primative.IsNil() { + buf = append(buf, "NULL"...) + break + } + + if elem := primative.Type().Elem(); elem.Kind() != reflect.Uint8 { + return nil, ErrInterpolateUnsupportedArgs + } + + var data []byte + + // Bytes() will panic if primative is an array and cannot be addressed. + // Copy all bytes to data as a fallback. + if k == reflect.Array && !primative.CanAddr() { + l := primative.Len() + data = make([]byte, l) + + for i := 0; i < l; i++ { + data[i] = byte(primative.Index(i).Uint()) + } + } else { + data = primative.Bytes() + } + + switch flavor { + case MySQL: + buf = append(buf, "_binary"...) + buf = quoteStringValue(buf, *(*string)(unsafe.Pointer(&data)), flavor) + + case PostgreSQL: + buf = append(buf, "E'\\\\x"...) + buf = appendHex(buf, data) + buf = append(buf, "'::bytea"...) + + case SQLite: + buf = append(buf, "X'"...) + buf = appendHex(buf, data) + buf = append(buf, '\'') + + case SQLServer, CQL: + buf = append(buf, "0x"...) + buf = appendHex(buf, data) + + case ClickHouse: + buf = append(buf, "unhex('"...) + buf = appendHex(buf, data) + buf = append(buf, "')"...) + + case Presto: + buf = append(buf, "from_hex('"...) + buf = appendHex(buf, data) + buf = append(buf, "')"...) + + case Oracle: + buf = append(buf, "hextoraw('"...) + buf = appendHex(buf, data) + buf = append(buf, "')"...) + + default: + return nil, ErrInterpolateUnsupportedArgs + } + + default: + return nil, ErrInterpolateUnsupportedArgs + } + } + + return buf, nil +} + +var hexDigits = [16]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'} + +func appendHex(buf, v []byte) []byte { + for _, b := range v { + buf = append(buf, hexDigits[(b>>4)&0xF], hexDigits[b&0xF]) + } + + return buf +} + +func quoteStringValue(buf []byte, s string, flavor Flavor) []byte { + switch flavor { + case PostgreSQL: + buf = append(buf, 'E') + case SQLServer: + buf = append(buf, 'N') + } + + buf = append(buf, '\'') + r, sz := utf8.DecodeRuneInString(s) + + for ; sz != 0; r, sz = utf8.DecodeRuneInString(s) { + switch r { + case '\x00': + buf = append(buf, "\\0"...) + + case '\b': + buf = append(buf, "\\b"...) + + case '\n': + buf = append(buf, "\\n"...) + + case '\r': + buf = append(buf, "\\r"...) + + case '\t': + buf = append(buf, "\\t"...) + + case '\x1a': + buf = append(buf, "\\Z"...) + + case '\'': + if flavor == CQL { + buf = append(buf, "''"...) + } else { + buf = append(buf, "\\'"...) + } + + case '"': + buf = append(buf, "\\\""...) + + case '\\': + buf = append(buf, "\\\\"...) + + default: + buf = append(buf, s[:sz]...) + } + + s = s[sz:] + } + + buf = append(buf, '\'') + return buf +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/modifiers.go b/vendor/github.com/huandu/go-sqlbuilder/modifiers.go new file mode 100644 index 000000000..9de620afa --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/modifiers.go @@ -0,0 +1,125 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "reflect" + "strings" +) + +// Escape replaces `$` with `$$` in ident. +func Escape(ident string) string { + return strings.Replace(ident, "$", "$$", -1) +} + +// EscapeAll replaces `$` with `$$` in all strings of ident. +func EscapeAll(ident ...string) []string { + escaped := make([]string, 0, len(ident)) + + for _, i := range ident { + escaped = append(escaped, Escape(i)) + } + + return escaped +} + +// Flatten recursively extracts values in slices and returns +// a flattened []interface{} with all values. +// If slices is not a slice, return `[]interface{}{slices}`. +func Flatten(slices interface{}) (flattened []interface{}) { + v := reflect.ValueOf(slices) + slices, flattened = flatten(v) + + if slices != nil { + return []interface{}{slices} + } + + return flattened +} + +func flatten(v reflect.Value) (elem interface{}, flattened []interface{}) { + k := v.Kind() + + for k == reflect.Interface { + v = v.Elem() + k = v.Kind() + } + + if k != reflect.Slice && k != reflect.Array { + if !v.IsValid() || !v.CanInterface() { + return + } + + elem = v.Interface() + return elem, nil + } + + for i, l := 0, v.Len(); i < l; i++ { + e, f := flatten(v.Index(i)) + + if e == nil { + flattened = append(flattened, f...) + } else { + flattened = append(flattened, e) + } + } + + return +} + +type rawArgs struct { + expr string +} + +// Raw marks the expr as a raw value which will not be added to args. +func Raw(expr string) interface{} { + return rawArgs{expr} +} + +type listArgs struct { + args []interface{} + isTuple bool +} + +// List marks arg as a list of data. +// If arg is `[]int{1, 2, 3}`, it will be compiled to `?, ?, ?` with args `[1 2 3]`. +func List(arg interface{}) interface{} { + return listArgs{ + args: Flatten(arg), + } +} + +// Tuple wraps values into a tuple and can be used as a single value. +func Tuple(values ...interface{}) interface{} { + return listArgs{ + args: values, + isTuple: true, + } +} + +// TupleNames joins names with tuple format. +// The names is not escaped. Use `EscapeAll` to escape them if necessary. +func TupleNames(names ...string) string { + buf := newStringBuilder() + buf.WriteRune('(') + buf.WriteStrings(names, ", ") + buf.WriteRune(')') + + return buf.String() +} + +type namedArgs struct { + name string + arg interface{} +} + +// Named creates a named argument. +// Unlike `sql.Named`, this named argument works only with `Build` or `BuildNamed` for convenience +// and will be replaced to a `?` after `Compile`. +func Named(name string, arg interface{}) interface{} { + return namedArgs{ + name: name, + arg: arg, + } +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/select.go b/vendor/github.com/huandu/go-sqlbuilder/select.go new file mode 100644 index 000000000..da7f98e05 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/select.go @@ -0,0 +1,620 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "fmt" + "reflect" + "strings" + + "github.com/huandu/go-clone" +) + +const ( + selectMarkerInit injectionMarker = iota + selectMarkerAfterWith + selectMarkerAfterSelect + selectMarkerAfterFrom + selectMarkerAfterJoin + selectMarkerAfterWhere + selectMarkerAfterGroupBy + selectMarkerAfterOrderBy + selectMarkerAfterLimit + selectMarkerAfterFor +) + +// JoinOption is the option in JOIN. +type JoinOption string + +// Join options. +const ( + FullJoin JoinOption = "FULL" + FullOuterJoin JoinOption = "FULL OUTER" + InnerJoin JoinOption = "INNER" + LeftJoin JoinOption = "LEFT" + LeftOuterJoin JoinOption = "LEFT OUTER" + RightJoin JoinOption = "RIGHT" + RightOuterJoin JoinOption = "RIGHT OUTER" +) + +// NewSelectBuilder creates a new SELECT builder. +func NewSelectBuilder() *SelectBuilder { + return DefaultFlavor.NewSelectBuilder() +} + +func newSelectBuilder() *SelectBuilder { + args := &Args{} + proxy := &whereClauseProxy{} + return &SelectBuilder{ + whereClauseProxy: proxy, + whereClauseExpr: args.Add(proxy), + + Cond: Cond{ + Args: args, + }, + args: args, + injection: newInjection(), + } +} + +// Clone returns a deep copy of SelectBuilder. +// It's useful when you want to create a base builder and clone it to build similar queries. +func (sb *SelectBuilder) Clone() *SelectBuilder { + return clone.Clone(sb).(*SelectBuilder) +} + +func init() { + t := reflect.TypeOf(SelectBuilder{}) + clone.SetCustomFunc(t, func(allocator *clone.Allocator, old, new reflect.Value) { + cloned := allocator.CloneSlowly(old) + new.Set(cloned) + + sb := cloned.Addr().Interface().(*SelectBuilder) + sb.args.Replace(sb.whereClauseExpr, sb.whereClauseProxy) + sb.args.Replace(sb.cteBuilderVar, sb.cteBuilder) + }) +} + +// SelectBuilder is a builder to build SELECT. +type SelectBuilder struct { + *WhereClause + Cond + + whereClauseProxy *whereClauseProxy + whereClauseExpr string + + cteBuilderVar string + cteBuilder *CTEBuilder + + distinct bool + tables []string + selectCols []string + joinOptions []JoinOption + joinTables []string + joinExprs [][]string + havingExprs []string + groupByCols []string + orderByCols []string + order string + limitVar string + offsetVar string + forWhat string + + args *Args + + injection *injection + marker injectionMarker +} + +var _ Builder = new(SelectBuilder) + +// Select sets columns in SELECT. +func Select(col ...string) *SelectBuilder { + return DefaultFlavor.NewSelectBuilder().Select(col...) +} + +// TableNames returns all table names in this SELECT statement. +func (sb *SelectBuilder) TableNames() []string { + var additionalTableNames []string + + if sb.cteBuilder != nil { + additionalTableNames = sb.cteBuilder.tableNamesForFrom() + } + + var tableNames []string + + if len(sb.tables) > 0 && len(additionalTableNames) > 0 { + tableNames = make([]string, len(sb.tables)+len(additionalTableNames)) + copy(tableNames, sb.tables) + copy(tableNames[len(sb.tables):], additionalTableNames) + } else if len(sb.tables) > 0 { + tableNames = sb.tables + } else if len(additionalTableNames) > 0 { + tableNames = additionalTableNames + } + + return tableNames +} + +// With sets WITH clause (the Common Table Expression) before SELECT. +func (sb *SelectBuilder) With(builder *CTEBuilder) *SelectBuilder { + sb.marker = selectMarkerAfterWith + sb.cteBuilderVar = sb.Var(builder) + sb.cteBuilder = builder + return sb +} + +// Select sets columns in SELECT. +func (sb *SelectBuilder) Select(col ...string) *SelectBuilder { + sb.selectCols = col + sb.marker = selectMarkerAfterSelect + return sb +} + +// SelectMore adds more columns in SELECT. +func (sb *SelectBuilder) SelectMore(col ...string) *SelectBuilder { + sb.selectCols = append(sb.selectCols, col...) + sb.marker = selectMarkerAfterSelect + return sb +} + +// Distinct marks this SELECT as DISTINCT. +func (sb *SelectBuilder) Distinct() *SelectBuilder { + sb.distinct = true + sb.marker = selectMarkerAfterSelect + return sb +} + +// From sets table names in SELECT. +func (sb *SelectBuilder) From(table ...string) *SelectBuilder { + sb.tables = table + sb.marker = selectMarkerAfterFrom + return sb +} + +// Join sets expressions of JOIN in SELECT. +// +// It builds a JOIN expression like +// +// JOIN table ON onExpr[0] AND onExpr[1] ... +func (sb *SelectBuilder) Join(table string, onExpr ...string) *SelectBuilder { + sb.marker = selectMarkerAfterJoin + return sb.JoinWithOption("", table, onExpr...) +} + +// JoinWithOption sets expressions of JOIN with an option. +// +// It builds a JOIN expression like +// +// option JOIN table ON onExpr[0] AND onExpr[1] ... +// +// Here is a list of supported options. +// - FullJoin: FULL JOIN +// - FullOuterJoin: FULL OUTER JOIN +// - InnerJoin: INNER JOIN +// - LeftJoin: LEFT JOIN +// - LeftOuterJoin: LEFT OUTER JOIN +// - RightJoin: RIGHT JOIN +// - RightOuterJoin: RIGHT OUTER JOIN +func (sb *SelectBuilder) JoinWithOption(option JoinOption, table string, onExpr ...string) *SelectBuilder { + sb.joinOptions = append(sb.joinOptions, option) + sb.joinTables = append(sb.joinTables, table) + sb.joinExprs = append(sb.joinExprs, onExpr) + sb.marker = selectMarkerAfterJoin + return sb +} + +// Where sets expressions of WHERE in SELECT. +func (sb *SelectBuilder) Where(andExpr ...string) *SelectBuilder { + if len(andExpr) == 0 || estimateStringsBytes(andExpr) == 0 { + return sb + } + + if sb.WhereClause == nil { + sb.WhereClause = NewWhereClause() + } + + sb.WhereClause.AddWhereExpr(sb.args, andExpr...) + sb.marker = selectMarkerAfterWhere + return sb +} + +// AddWhereClause adds all clauses in the whereClause to SELECT. +func (sb *SelectBuilder) AddWhereClause(whereClause *WhereClause) *SelectBuilder { + if sb.WhereClause == nil { + sb.WhereClause = NewWhereClause() + } + + sb.WhereClause.AddWhereClause(whereClause) + return sb +} + +// Having sets expressions of HAVING in SELECT. +func (sb *SelectBuilder) Having(andExpr ...string) *SelectBuilder { + sb.havingExprs = append(sb.havingExprs, andExpr...) + sb.marker = selectMarkerAfterGroupBy + return sb +} + +// GroupBy sets columns of GROUP BY in SELECT. +func (sb *SelectBuilder) GroupBy(col ...string) *SelectBuilder { + sb.groupByCols = append(sb.groupByCols, col...) + sb.marker = selectMarkerAfterGroupBy + return sb +} + +// OrderBy sets columns of ORDER BY in SELECT. +func (sb *SelectBuilder) OrderBy(col ...string) *SelectBuilder { + sb.orderByCols = append(sb.orderByCols, col...) + sb.marker = selectMarkerAfterOrderBy + return sb +} + +// Asc sets order of ORDER BY to ASC. +func (sb *SelectBuilder) Asc() *SelectBuilder { + sb.order = "ASC" + sb.marker = selectMarkerAfterOrderBy + return sb +} + +// Desc sets order of ORDER BY to DESC. +func (sb *SelectBuilder) Desc() *SelectBuilder { + sb.order = "DESC" + sb.marker = selectMarkerAfterOrderBy + return sb +} + +// Limit sets the LIMIT in SELECT. +func (sb *SelectBuilder) Limit(limit int) *SelectBuilder { + if limit < 0 { + sb.limitVar = "" + return sb + } + + sb.limitVar = sb.Var(limit) + sb.marker = selectMarkerAfterLimit + return sb +} + +// Offset sets the LIMIT offset in SELECT. +func (sb *SelectBuilder) Offset(offset int) *SelectBuilder { + if offset < 0 { + sb.offsetVar = "" + return sb + } + + sb.offsetVar = sb.Var(offset) + sb.marker = selectMarkerAfterLimit + return sb +} + +// ForUpdate adds FOR UPDATE at the end of SELECT statement. +func (sb *SelectBuilder) ForUpdate() *SelectBuilder { + sb.forWhat = "UPDATE" + sb.marker = selectMarkerAfterFor + return sb +} + +// ForShare adds FOR SHARE at the end of SELECT statement. +func (sb *SelectBuilder) ForShare() *SelectBuilder { + sb.forWhat = "SHARE" + sb.marker = selectMarkerAfterFor + return sb +} + +// As returns an AS expression. +func (sb *SelectBuilder) As(name, alias string) string { + return fmt.Sprintf("%s AS %s", name, alias) +} + +// BuilderAs returns an AS expression wrapping a complex SQL. +// According to SQL syntax, SQL built by builder is surrounded by parens. +func (sb *SelectBuilder) BuilderAs(builder Builder, alias string) string { + return fmt.Sprintf("(%s) AS %s", sb.Var(builder), alias) +} + +// LateralAs returns a LATERAL derived table expression wrapping a complex SQL. +func (sb *SelectBuilder) LateralAs(builder Builder, alias string) string { + return fmt.Sprintf("LATERAL (%s) AS %s", sb.Var(builder), alias) +} + +// NumCol returns the number of columns to select. +func (sb *SelectBuilder) NumCol() int { + return len(sb.selectCols) +} + +// String returns the compiled SELECT string. +func (sb *SelectBuilder) String() string { + s, _ := sb.Build() + return s +} + +// Build returns compiled SELECT string and args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (sb *SelectBuilder) Build() (sql string, args []interface{}) { + return sb.BuildWithFlavor(sb.args.Flavor) +} + +// BuildWithFlavor returns compiled SELECT string and args with flavor and initial args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (sb *SelectBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + buf := newStringBuilder() + sb.injection.WriteTo(buf, selectMarkerInit) + + oraclePage := flavor == Oracle && (len(sb.limitVar) > 0 || len(sb.offsetVar) > 0) + + if sb.cteBuilderVar != "" { + buf.WriteLeadingString(sb.cteBuilderVar) + sb.injection.WriteTo(buf, selectMarkerAfterWith) + } + + if len(sb.selectCols) > 0 { + buf.WriteLeadingString("SELECT ") + + if sb.distinct { + buf.WriteString("DISTINCT ") + } + + if oraclePage { + var selectCols = make([]string, 0, len(sb.selectCols)) + for i := range sb.selectCols { + cols := strings.SplitN(sb.selectCols[i], ".", 2) + + if len(cols) == 1 { + selectCols = append(selectCols, cols[0]) + } else { + selectCols = append(selectCols, cols[1]) + } + } + buf.WriteStrings(selectCols, ", ") + } else { + buf.WriteStrings(sb.selectCols, ", ") + } + } + + sb.injection.WriteTo(buf, selectMarkerAfterSelect) + + if oraclePage { + if len(sb.selectCols) > 0 { + buf.WriteLeadingString("FROM (SELECT ") + + if sb.distinct { + buf.WriteString("DISTINCT ") + } + + var selectCols = make([]string, 0, len(sb.selectCols)+1) + selectCols = append(selectCols, "ROWNUM r") + + for i := range sb.selectCols { + cols := strings.SplitN(sb.selectCols[i], ".", 2) + if len(cols) == 1 { + selectCols = append(selectCols, cols[0]) + } else { + selectCols = append(selectCols, cols[1]) + } + } + + buf.WriteStrings(selectCols, ", ") + buf.WriteLeadingString("FROM (SELECT ") + buf.WriteStrings(sb.selectCols, ", ") + } + } + + tableNames := sb.TableNames() + + if len(tableNames) > 0 { + buf.WriteLeadingString("FROM ") + buf.WriteStrings(tableNames, ", ") + } + + sb.injection.WriteTo(buf, selectMarkerAfterFrom) + + for i := range sb.joinTables { + if option := sb.joinOptions[i]; option != "" { + buf.WriteLeadingString(string(option)) + } + + buf.WriteLeadingString("JOIN ") + buf.WriteString(sb.joinTables[i]) + + if exprs := filterEmptyStrings(sb.joinExprs[i]); len(exprs) > 0 { + buf.WriteString(" ON ") + buf.WriteStrings(exprs, " AND ") + } + } + + if len(sb.joinTables) > 0 { + sb.injection.WriteTo(buf, selectMarkerAfterJoin) + } + + if sb.WhereClause != nil { + sb.whereClauseProxy.WhereClause = sb.WhereClause + defer func() { + sb.whereClauseProxy.WhereClause = nil + }() + + buf.WriteLeadingString(sb.whereClauseExpr) + sb.injection.WriteTo(buf, selectMarkerAfterWhere) + } + + if len(sb.groupByCols) > 0 { + buf.WriteLeadingString("GROUP BY ") + buf.WriteStrings(sb.groupByCols, ", ") + + if havingExprs := filterEmptyStrings(sb.havingExprs); len(havingExprs) > 0 { + buf.WriteString(" HAVING ") + buf.WriteStrings(havingExprs, " AND ") + } + + sb.injection.WriteTo(buf, selectMarkerAfterGroupBy) + } + + if len(sb.orderByCols) > 0 { + buf.WriteLeadingString("ORDER BY ") + buf.WriteStrings(sb.orderByCols, ", ") + + if sb.order != "" { + buf.WriteRune(' ') + buf.WriteString(sb.order) + } + + sb.injection.WriteTo(buf, selectMarkerAfterOrderBy) + } + + switch flavor { + case MySQL, SQLite, ClickHouse: + if len(sb.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(sb.limitVar) + + if len(sb.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(sb.offsetVar) + } + } + + case CQL: + if len(sb.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(sb.limitVar) + } + + case PostgreSQL: + if len(sb.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(sb.limitVar) + } + + if len(sb.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(sb.offsetVar) + } + + case Presto: + // There might be a hidden constraint in Presto requiring offset to be set before limit. + // The select statement documentation (https://prestodb.io/docs/current/sql/select.html) + // puts offset before limit, and Trino, which is based on Presto, seems + // to require this specific order. + if len(sb.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(sb.offsetVar) + } + + if len(sb.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(sb.limitVar) + } + + case SQLServer: + // If ORDER BY is not set, sort column #1 by default. + // It's required to make OFFSET...FETCH work. + if len(sb.orderByCols) == 0 && (len(sb.limitVar) > 0 || len(sb.offsetVar) > 0) { + buf.WriteLeadingString("ORDER BY 1") + } + + if len(sb.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(sb.offsetVar) + buf.WriteString(" ROWS") + } + + if len(sb.limitVar) > 0 { + if len(sb.offsetVar) == 0 { + buf.WriteLeadingString("OFFSET 0 ROWS") + } + + buf.WriteLeadingString("FETCH NEXT ") + buf.WriteString(sb.limitVar) + buf.WriteString(" ROWS ONLY") + } + + case Oracle: + if oraclePage { + buf.WriteString(") ") + + if len(sb.tables) > 0 { + buf.WriteStrings(sb.tables, ", ") + } + + buf.WriteString(") WHERE ") + + if len(sb.limitVar) > 0 { + buf.WriteString("r BETWEEN ") + + if len(sb.offsetVar) > 0 { + buf.WriteString(sb.offsetVar) + buf.WriteString(" + 1 AND ") + buf.WriteString(sb.limitVar) + buf.WriteString(" + ") + buf.WriteString(sb.offsetVar) + } else { + buf.WriteString("1 AND ") + buf.WriteString(sb.limitVar) + buf.WriteString(" + 1") + } + } else { + // As oraclePage is true, sb.offsetVar must not be empty. + buf.WriteString("r >= ") + buf.WriteString(sb.offsetVar) + buf.WriteString(" + 1") + } + } + + case Informix: + // [SKIP N] FIRST M + // M must be greater than 0 + if len(sb.limitVar) > 0 { + if len(sb.offsetVar) > 0 { + buf.WriteLeadingString("SKIP ") + buf.WriteString(sb.offsetVar) + } + + buf.WriteLeadingString("FIRST ") + buf.WriteString(sb.limitVar) + } + + case Doris: + // #192: Doris doesn't support ? in OFFSET and LIMIT. + if len(sb.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(fmt.Sprint(sb.args.Value(sb.limitVar))) + + if len(sb.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(fmt.Sprint(sb.args.Value(sb.offsetVar))) + } + } + } + + if len(sb.limitVar) > 0 { + sb.injection.WriteTo(buf, selectMarkerAfterLimit) + } + + if sb.forWhat != "" { + buf.WriteLeadingString("FOR ") + buf.WriteString(sb.forWhat) + + sb.injection.WriteTo(buf, selectMarkerAfterFor) + } + + return sb.args.CompileWithFlavor(buf.String(), flavor, initialArg...) +} + +// SetFlavor sets the flavor of compiled sql. +func (sb *SelectBuilder) SetFlavor(flavor Flavor) (old Flavor) { + old = sb.args.Flavor + sb.args.Flavor = flavor + return +} + +// Flavor returns flavor of builder +func (sb *SelectBuilder) Flavor() Flavor { + return sb.args.Flavor +} + +// SQL adds an arbitrary sql to current position. +func (sb *SelectBuilder) SQL(sql string) *SelectBuilder { + sb.injection.SQL(sb.marker, sql) + return sb +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/stringbuilder.go b/vendor/github.com/huandu/go-sqlbuilder/stringbuilder.go new file mode 100644 index 000000000..6fd37dfeb --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/stringbuilder.go @@ -0,0 +1,103 @@ +// Copyright 2023 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "io" + "strings" +) + +type stringBuilder struct { + builder *strings.Builder +} + +var _ io.Writer = new(stringBuilder) + +func newStringBuilder() *stringBuilder { + return &stringBuilder{ + builder: &strings.Builder{}, + } +} + +// WriteLeadingString writes s to internal buffer. +// If it's not the first time to write the string, a blank (" ") will be written before s. +func (sb *stringBuilder) WriteLeadingString(s string) { + if sb.builder.Len() > 0 { + sb.builder.WriteString(" ") + } + + sb.builder.WriteString(s) +} + +func (sb *stringBuilder) WriteString(s string) { + sb.builder.WriteString(s) +} + +func (sb *stringBuilder) WriteStrings(ss []string, sep string) { + if len(ss) == 0 { + return + } + + firstAdded := false + if len(ss[0]) != 0 { + sb.WriteString(ss[0]) + firstAdded = true + } + + for _, s := range ss[1:] { + if len(s) != 0 { + if firstAdded { + sb.WriteString(sep) + } + sb.WriteString(s) + firstAdded = true + } + } +} + +func (sb *stringBuilder) WriteRune(r rune) { + sb.builder.WriteRune(r) +} + +func (sb *stringBuilder) Write(data []byte) (int, error) { + return sb.builder.Write(data) +} + +func (sb *stringBuilder) String() string { + return sb.builder.String() +} + +func (sb *stringBuilder) Reset() { + sb.builder.Reset() +} + +func (sb *stringBuilder) Grow(n int) { + sb.builder.Grow(n) +} + +// filterEmptyStrings removes empty strings from ss. +// As ss rarely contains empty strings, filterEmptyStrings tries to avoid allocation if possible. +func filterEmptyStrings(ss []string) []string { + emptyStrings := 0 + + for _, s := range ss { + if len(s) == 0 { + emptyStrings++ + } + } + + if emptyStrings == 0 { + return ss + } + + filtered := make([]string, 0, len(ss)-emptyStrings) + + for _, s := range ss { + if len(s) != 0 { + filtered = append(filtered, s) + } + } + + return filtered +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/struct.go b/vendor/github.com/huandu/go-sqlbuilder/struct.go new file mode 100644 index 000000000..a23b9c3e2 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/struct.go @@ -0,0 +1,815 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "database/sql/driver" + "math" + "reflect" + "regexp" + "sort" + "strings" +) + +var ( + // DBTag is the struct tag to describe the name for a field in struct. + DBTag = "db" + + // FieldTag is the struct tag to describe the tag name for a field in struct. + // Use "," to separate different tags. + FieldTag = "fieldtag" + + // FieldOpt is the options for a struct field. + // As db column can contain "," in theory, field options should be provided in a separated tag. + FieldOpt = "fieldopt" + + // FieldAs is the column alias (AS) for a struct field. + FieldAs = "fieldas" +) + +const ( + fieldOptWithQuote = "withquote" + fieldOptOmitEmpty = "omitempty" + + optName = "optName" + optParams = "optParams" +) + +var optRegex = regexp.MustCompile(`(?P<` + optName + `>\w+)(\((?P<` + optParams + `>.*)\))?`) + +var typeOfSQLDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// Struct represents a struct type. +// +// All methods in Struct are thread-safe. +// We can define a global variable to hold a Struct and use it in any goroutine. +type Struct struct { + Flavor Flavor + + structType reflect.Type + structFieldsParser structFieldsParser + withTags []string + withoutTags []string +} + +var emptyStruct Struct + +// NewStruct analyzes type information in structValue +// and creates a new Struct with all structValue fields. +// If structValue is not a struct, NewStruct returns a dummy Struct. +func NewStruct(structValue interface{}) *Struct { + t := reflect.TypeOf(structValue) + t = dereferencedType(t) + + if t.Kind() != reflect.Struct { + return &emptyStruct + } + + return &Struct{ + Flavor: DefaultFlavor, + structType: t, + structFieldsParser: makeDefaultFieldsParser(t), + } +} + +// For sets the default flavor of s and returns a shadow copy of s. +// The original s.Flavor is not changed. +func (s *Struct) For(flavor Flavor) *Struct { + c := *s + c.Flavor = flavor + return &c +} + +// WithFieldMapper returns a new Struct based on s with custom field mapper. +// The original s is not changed. +func (s *Struct) WithFieldMapper(mapper FieldMapperFunc) *Struct { + if s.structType == nil { + return &emptyStruct + } + + c := *s + c.structFieldsParser = makeCustomFieldsParser(s.structType, mapper) + return &c +} + +// WithTag sets included tag(s) for all builder methods. +// For instance, calling s.WithTag("tag").SelectFrom("t") is to select all fields tagged with "tag" from table "t". +// +// If multiple tags are provided, fields tagged with any of them are included. +// That is, s.WithTag("tag1", "tag2").SelectFrom("t") is to select all fields tagged with "tag1" or "tag2" from table "t". +func (s *Struct) WithTag(tags ...string) *Struct { + if len(tags) == 0 { + return s + } + + c := *s + c.mergeWithTags(tags) + return &c +} + +func (s *Struct) mergeWithTags(with []string) { + newTags := make([]int, 0, len(with)) + withTags := s.withTags + withoutTags := s.withoutTags + + if len(withoutTags) == 0 { + for i, tag := range with { + if tag == "" { + continue + } + + if !hasTag(withTags, tag) { + newTags = append(newTags, i) + } + } + } else { + for i, tag := range with { + if tag == "" { + continue + } + + if !hasTag(withTags, tag) { + if !hasTag(withoutTags, tag) { + newTags = append(newTags, i) + } + } + } + } + + if len(newTags) == 0 { + return + } + + // Merge with tags. + withTags = make([]string, 0, len(s.withTags)+len(newTags)) + withTags = append(withTags, s.withTags...) + + for _, idx := range newTags { + withTags = append(withTags, with[idx]) + } + + sort.Strings(withTags) + withTags = removeDuplicatedTags(withTags) + s.withTags = withTags +} + +// WithoutTag sets excluded tag(s) for all builder methods. +// For instance, calling s.WithoutTag("tag").SelectFrom("t") is to select all fields except those tagged with "tag" from table "t". +// +// If multiple tags are provided, fields tagged with any of them are excluded. +// That is, s.WithoutTag("tag1", "tag2").SelectFrom("t") is to exclude any field tagged with "tag1" or "tag2" from table "t". +func (s *Struct) WithoutTag(tags ...string) *Struct { + if len(tags) == 0 { + return s + } + + c := *s + c.mergeWithoutTags(tags) + return &c +} + +func (s *Struct) mergeWithoutTags(without []string) { + withTags := s.withTags + withoutTags := s.withoutTags + + if len(withoutTags) == 0 { + withoutTags = make([]string, len(without)) + copy(withoutTags, without) + } else { + newTags := make([]int, 0, len(without)) + + for i, tag := range without { + if tag == "" { + continue + } + + if !hasTag(withoutTags, tag) { + newTags = append(newTags, i) + } + } + + if len(newTags) == 0 { + return + + } + + // Merge without tags. + tags := make([]string, 0, len(withoutTags)+len(newTags)) + tags = append(tags, withoutTags...) + + for _, idx := range newTags { + tags = append(tags, without[idx]) + } + + withoutTags = tags + } + + sort.Strings(withoutTags) + withoutTags = removeDuplicatedTags(withoutTags) + + // Filter out useless tags in s.withTags. + kept := make([]int, 0, len(withTags)) + + for i, tag := range withTags { + if !hasTag(withoutTags, tag) { + kept = append(kept, i) + } + } + + if len(kept) > 0 { + filteredTags := make([]string, 0, len(kept)) + + for _, i := range kept { + filteredTags = append(filteredTags, withTags[i]) + } + + withTags = filteredTags + } else { + withTags = nil + } + + // Update with and without tags. + s.withTags = withTags + s.withoutTags = withoutTags +} + +func hasTag(tags []string, tag string) bool { + if len(tags) == 0 { + return false + } + + i := sort.SearchStrings(tags, tag) + return i < len(tags) && tags[i] == tag +} + +func removeDuplicatedTags(tags []string) []string { + if len(tags) <= 1 { + return tags + } + + // Unlikely to find any duplicates. + hasDupes := false + + for i := 1; i < len(tags); i++ { + if tags[i] == tags[i-1] { + hasDupes = true + break + } + } + + if !hasDupes { + return tags + } + + unique := make([]string, 0, len(tags)) + unique = append(unique, tags[0]) + + for i := 1; i < len(tags); i++ { + if tags[i] != tags[i-1] { + unique = append(unique, tags[i]) + } + } + + return unique +} + +// SelectFrom creates a new `SelectBuilder` with table name. +// By default, all exported fields of the s are listed as columns in SELECT. +// +// Caller is responsible to set WHERE condition to find right record. +func (s *Struct) SelectFrom(table string) *SelectBuilder { + return s.selectFromWithTags(table, s.withTags, s.withoutTags) +} + +// SelectFromForTag creates a new `SelectBuilder` with table name for a specified tag. +// By default, all fields of the s tagged with tag are listed as columns in SELECT. +// +// Caller is responsible to set WHERE condition to find right record. +// +// Deprecated: It's recommended to use s.WithTag(tag).SelectFrom(...) instead of calling this method. +// The former one is more readable and can be chained with other methods. +func (s *Struct) SelectFromForTag(table string, tag string) (sb *SelectBuilder) { + return s.selectFromWithTags(table, []string{tag}, nil) +} + +func (s *Struct) selectFromWithTags(table string, with, without []string) (sb *SelectBuilder) { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(with, without) + + sb = s.Flavor.NewSelectBuilder() + sb.From(table) + + if tagged == nil { + sb.Select("*") + return + } + + buf := newStringBuilder() + cols := make([]string, 0, len(tagged.ForRead)) + tableAlias := parseTableAlias(table) + + for _, sf := range tagged.ForRead { + if s.Flavor != CQL && !strings.ContainsRune(sf.Alias, '.') { + buf.WriteString(tableAlias) + buf.WriteRune('.') + } + buf.WriteString(sf.NameForSelect(s.Flavor)) + + cols = append(cols, buf.String()) + buf.Reset() + } + + sb.Select(cols...) + return sb +} + +func parseTableAlias(table string) string { + idx := strings.LastIndex(table, " ") + + if idx == -1 { + return table + } + + return table[idx+1:] +} + +// Update creates a new `UpdateBuilder` with table name. +// By default, all exported fields of the s is assigned in UPDATE with the field values from value. +// If value's type is not the same as that of s, Update returns a dummy `UpdateBuilder` with table name. +// +// Caller is responsible to set WHERE condition to match right record. +func (s *Struct) Update(table string, value interface{}) *UpdateBuilder { + return s.updateWithTags(table, s.withTags, s.withoutTags, value) +} + +// UpdateForTag creates a new `UpdateBuilder` with table name. +// By default, all fields of the s tagged with tag is assigned in UPDATE with the field values from value. +// If value's type is not the same as that of s, UpdateForTag returns a dummy `UpdateBuilder` with table name. +// +// Caller is responsible to set WHERE condition to match right record. +// +// Deprecated: It's recommended to use s.WithTag(tag).Update(...) instead of calling this method. +// The former one is more readable and can be chained with other methods. +func (s *Struct) UpdateForTag(table string, tag string, value interface{}) *UpdateBuilder { + return s.updateWithTags(table, []string{tag}, nil, value) +} + +func (s *Struct) updateWithTags(table string, with, without []string, value interface{}) *UpdateBuilder { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(with, without) + + ub := s.Flavor.NewUpdateBuilder() + ub.Update(table) + + if tagged == nil { + return ub + } + + v := reflect.ValueOf(value) + v = dereferencedValue(v) + + if v.Type() != s.structType { + return ub + } + + assignments := make([]string, 0, len(tagged.ForWrite)) + + for _, sf := range tagged.ForWrite { + name := sf.Name + val := v.FieldByName(name) + + if isEmptyValue(val) { + if sf.ShouldOmitEmpty(with...) { + continue + } + } else { + val = dereferencedFieldValue(val) + } + + data := val.Interface() + assignments = append(assignments, ub.Assign(sf.Quote(s.Flavor), data)) + } + + ub.Set(assignments...) + return ub +} + +// InsertInto creates a new `InsertBuilder` with table name using verb INSERT INTO. +// By default, all exported fields of s are set as columns by calling `InsertBuilder#Cols`, +// and value is added as a list of values by calling `InsertBuilder#Values`. +// +// InsertInto never returns any error. +// If the type of any item in value is not expected, it will be ignored. +// If value is an empty slice, `InsertBuilder#Values` will not be called. +func (s *Struct) InsertInto(table string, value ...interface{}) *InsertBuilder { + ib := s.Flavor.NewInsertBuilder() + ib.InsertInto(table) + + s.buildColsAndValuesForTag(ib, s.withTags, s.withoutTags, value...) + return ib +} + +// InsertIgnoreInto creates a new `InsertBuilder` with table name using verb INSERT IGNORE INTO. +// By default, all exported fields of s are set as columns by calling `InsertBuilder#Cols`, +// and value is added as a list of values by calling `InsertBuilder#Values`. +// +// InsertIgnoreInto never returns any error. +// If the type of any item in value is not expected, it will be ignored. +// If value is an empty slice, `InsertBuilder#Values` will not be called. +func (s *Struct) InsertIgnoreInto(table string, value ...interface{}) *InsertBuilder { + ib := s.Flavor.NewInsertBuilder() + ib.InsertIgnoreInto(table) + + s.buildColsAndValuesForTag(ib, s.withTags, s.withoutTags, value...) + return ib +} + +// ReplaceInto creates a new `InsertBuilder` with table name using verb REPLACE INTO. +// By default, all exported fields of s are set as columns by calling `InsertBuilder#Cols`, +// and value is added as a list of values by calling `InsertBuilder#Values`. +// +// ReplaceInto never returns any error. +// If the type of any item in value is not expected, it will be ignored. +// If value is an empty slice, `InsertBuilder#Values` will not be called. +func (s *Struct) ReplaceInto(table string, value ...interface{}) *InsertBuilder { + ib := s.Flavor.NewInsertBuilder() + ib.ReplaceInto(table) + + s.buildColsAndValuesForTag(ib, s.withTags, s.withoutTags, value...) + return ib +} + +// buildColsAndValuesForTag uses ib to set exported fields tagged with tag as columns +// and add value as a list of values. +func (s *Struct) buildColsAndValuesForTag(ib *InsertBuilder, with, without []string, value ...interface{}) { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(with, without) + + if tagged == nil { + return + } + + vs := make([]reflect.Value, 0, len(value)) + + for _, item := range value { + v := reflect.ValueOf(item) + v = dereferencedFieldValue(v) + + if v.Type() == s.structType { + vs = append(vs, v) + } + } + + if len(vs) == 0 { + return + } + + cols := make([]string, 0, len(tagged.ForWrite)) + values := make([][]interface{}, len(vs)) + nilCols := make([]int, 0, len(tagged.ForWrite)) + + for _, sf := range tagged.ForWrite { + cols = append(cols, sf.Quote(s.Flavor)) + name := sf.Name + shouldOmitEmpty := sf.ShouldOmitEmpty(with...) + nilCnt := 0 + + for i, v := range vs { + val := v.FieldByName(name) + + if isEmptyValue(val) && shouldOmitEmpty { + nilCnt++ + } + + val = dereferencedFieldValue(val) + + if val.IsValid() { + values[i] = append(values[i], val.Interface()) + } else { + values[i] = append(values[i], nil) + } + } + + nilCols = append(nilCols, nilCnt) + } + + // Try to filter out nil values if possible. + filteredCols := make([]string, 0, len(cols)) + filteredValues := make([][]interface{}, len(values)) + + for i, cnt := range nilCols { + // If all values are nil in a column, ignore the column completely. + if cnt == len(values) { + continue + } + + filteredCols = append(filteredCols, cols[i]) + + for n, value := range values { + filteredValues[n] = append(filteredValues[n], value[i]) + } + } + + ib.Cols(filteredCols...) + + for _, value := range filteredValues { + ib.Values(value...) + } +} + +// InsertIntoForTag creates a new `InsertBuilder` with table name using verb INSERT INTO. +// By default, exported fields tagged with tag are set as columns by calling `InsertBuilder#Cols`, +// and value is added as a list of values by calling `InsertBuilder#Values`. +// +// InsertIntoForTag never returns any error. +// If the type of any item in value is not expected, it will be ignored. +// If value is an empty slice, `InsertBuilder#Values` will not be called. +// +// Deprecated: It's recommended to use s.WithTag(tag).InsertInto(...) instead of calling this method. +// The former one is more readable and can be chained with other methods. +func (s *Struct) InsertIntoForTag(table string, tag string, value ...interface{}) *InsertBuilder { + ib := s.Flavor.NewInsertBuilder() + ib.InsertInto(table) + + s.buildColsAndValuesForTag(ib, []string{tag}, nil, value...) + return ib +} + +// InsertIgnoreIntoForTag creates a new `InsertBuilder` with table name using verb INSERT IGNORE INTO. +// By default, exported fields tagged with tag are set as columns by calling `InsertBuilder#Cols`, +// and value is added as a list of values by calling `InsertBuilder#Values`. +// +// InsertIgnoreIntoForTag never returns any error. +// If the type of any item in value is not expected, it will be ignored. +// If value is an empty slice, `InsertBuilder#Values` will not be called. +// +// Deprecated: It's recommended to use s.WithTag(tag).InsertIgnoreInto(...) instead of calling this method. +// The former one is more readable and can be chained with other methods. +func (s *Struct) InsertIgnoreIntoForTag(table string, tag string, value ...interface{}) *InsertBuilder { + ib := s.Flavor.NewInsertBuilder() + ib.InsertIgnoreInto(table) + + s.buildColsAndValuesForTag(ib, []string{tag}, nil, value...) + return ib +} + +// ReplaceIntoForTag creates a new `InsertBuilder` with table name using verb REPLACE INTO. +// By default, exported fields tagged with tag are set as columns by calling `InsertBuilder#Cols`, +// and value is added as a list of values by calling `InsertBuilder#Values`. +// +// ReplaceIntoForTag never returns any error. +// If the type of any item in value is not expected, it will be ignored. +// If value is an empty slice, `InsertBuilder#Values` will not be called. +// +// Deprecated: It's recommended to use s.WithTag(tag).ReplaceInto(...) instead of calling this method. +// The former one is more readable and can be chained with other methods. +func (s *Struct) ReplaceIntoForTag(table string, tag string, value ...interface{}) *InsertBuilder { + ib := s.Flavor.NewInsertBuilder() + ib.ReplaceInto(table) + + s.buildColsAndValuesForTag(ib, []string{tag}, nil, value...) + return ib +} + +// DeleteFrom creates a new `DeleteBuilder` with table name. +// +// Caller is responsible to set WHERE condition to match right record. +func (s *Struct) DeleteFrom(table string) *DeleteBuilder { + db := s.Flavor.NewDeleteBuilder() + db.DeleteFrom(table) + return db +} + +// Addr takes address of all exported fields of the s from the st. +// The returned result can be used in `Row#Scan` directly. +func (s *Struct) Addr(st interface{}) []interface{} { + return s.addrWithTags(s.withTags, s.withoutTags, st) +} + +// AddrForTag takes address of all fields of the s tagged with tag from the st. +// The returned value can be used in `Row#Scan` directly. +// +// If tag is not defined in s in advance, returns nil. +// +// Deprecated: It's recommended to use s.WithTag(tag).Addr(...) instead of calling this method. +// The former one is more readable and can be chained with other methods. +func (s *Struct) AddrForTag(tag string, st interface{}) []interface{} { + return s.addrWithTags([]string{tag}, nil, st) +} + +func (s *Struct) addrWithTags(with, without []string, st interface{}) []interface{} { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(with, without) + + if tagged == nil { + return nil + } + + return s.addrWithFields(tagged.ForRead, st) +} + +// AddrWithCols takes address of all columns defined in cols from the st. +// The returned value can be used in `Row#Scan` directly. +func (s *Struct) AddrWithCols(cols []string, st interface{}) []interface{} { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(s.withTags, s.withoutTags) + + if tagged == nil { + return nil + } + + fields := tagged.Cols(cols) + + if fields == nil { + return nil + } + + return s.addrWithFields(fields, st) +} + +func (s *Struct) addrWithFields(fields []*structField, st interface{}) []interface{} { + v := reflect.ValueOf(st) + v = dereferencedValue(v) + + if v.Type() != s.structType { + return nil + } + + addrs := make([]interface{}, 0, len(fields)) + + for _, sf := range fields { + name := sf.Name + data := v.FieldByName(name).Addr().Interface() + addrs = append(addrs, data) + } + + return addrs +} + +// Columns returns column names of s for all exported struct fields. +func (s *Struct) Columns() []string { + return s.columnsWithTags(s.withTags, s.withoutTags) +} + +// ColumnsForTag returns column names of the s tagged with tag. +// +// Deprecated: It's recommended to use s.WithTag(tag).Columns(...) instead of calling this method. +// The former one is more readable and can be chained with other methods. +func (s *Struct) ColumnsForTag(tag string) (cols []string) { + return s.columnsWithTags([]string{tag}, nil) +} + +func (s *Struct) columnsWithTags(with, without []string) (cols []string) { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(with, without) + + if tagged == nil { + return + } + + cols = make([]string, 0, len(tagged.ForWrite)) + + for _, sf := range tagged.ForWrite { + cols = append(cols, sf.Alias) + } + + return +} + +// Values returns a shadow copy of all exported fields in st. +func (s *Struct) Values(st interface{}) []interface{} { + return s.valuesWithTags(s.withTags, s.withoutTags, st) +} + +// ValuesForTag returns a shadow copy of all fields tagged with tag in st. +// +// Deprecated: It's recommended to use s.WithTag(tag).Values(...) instead of calling this method. +// The former one is more readable and can be chained with other methods. +func (s *Struct) ValuesForTag(tag string, value interface{}) (values []interface{}) { + return s.valuesWithTags([]string{tag}, nil, value) +} + +func (s *Struct) valuesWithTags(with, without []string, value interface{}) (values []interface{}) { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(with, without) + + if tagged == nil { + return + } + + v := reflect.ValueOf(value) + v = dereferencedValue(v) + + if v.Type() != s.structType { + return + } + + values = make([]interface{}, 0, len(tagged.ForWrite)) + + for _, sf := range tagged.ForWrite { + name := sf.Name + data := v.FieldByName(name).Interface() + values = append(values, data) + } + + return +} + +// ForeachRead foreach tags. +func (s *Struct) ForeachRead(trans func(dbtag string, isQuoted bool, field reflect.StructField)) { + s.foreachReadWithTags(s.withTags, s.withoutTags, trans) +} + +func (s *Struct) foreachReadWithTags(with, without []string, trans func(dbtag string, isQuoted bool, field reflect.StructField)) { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(with, without) + if tagged == nil { + return + } + for _, sf := range tagged.ForRead { + trans(sf.DBTag, sf.IsQuoted, sf.Field) + } +} + +// ForeachWrite foreach tags. +func (s *Struct) ForeachWrite(trans func(dbtag string, isQuoted bool, field reflect.StructField)) { + s.foreachWriteWithTags(s.withTags, s.withoutTags, trans) +} + +func (s *Struct) foreachWriteWithTags(with, without []string, trans func(dbtag string, isQuoted bool, field reflect.StructField)) { + sfs := s.structFieldsParser() + tagged := sfs.FilterTags(with, without) + if tagged == nil { + return + } + for _, sf := range tagged.ForWrite { + trans(sf.DBTag, sf.IsQuoted, sf.Field) + } +} + +func dereferencedType(t reflect.Type) reflect.Type { + for k := t.Kind(); k == reflect.Ptr || k == reflect.Interface; k = t.Kind() { + t = t.Elem() + } + + return t +} + +func dereferencedValue(v reflect.Value) reflect.Value { + for k := v.Kind(); k == reflect.Ptr || k == reflect.Interface; k = v.Kind() { + v = v.Elem() + } + + return v +} + +func dereferencedFieldValue(v reflect.Value) reflect.Value { + for k := v.Kind(); k == reflect.Ptr || k == reflect.Interface; k = v.Kind() { + if v.Type().Implements(typeOfSQLDriverValuer) { + break + } + + v = v.Elem() + } + + return v +} + +// isEmptyValue checks if v is zero. +// Following code is borrowed from `IsZero` method in `reflect.Value` since Go 1.13. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isEmptyValue(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isEmptyValue(v.Field(i)) { + return false + } + } + return true + } + + return false +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/structfields.go b/vendor/github.com/huandu/go-sqlbuilder/structfields.go new file mode 100644 index 000000000..e64904cef --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/structfields.go @@ -0,0 +1,351 @@ +package sqlbuilder + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +type structFields struct { + noTag *structTaggedFields + tagged map[string]*structTaggedFields +} + +type structTaggedFields struct { + // All columns for SELECT. + ForRead []*structField + colsForRead map[string]*structField + + // All columns which can be used in INSERT and UPDATE. + ForWrite []*structField + colsForWrite map[string]struct{} +} + +type structField struct { + Name string + Alias string + As string + Tags []string + IsQuoted bool + DBTag string + Field reflect.StructField + + omitEmptyTags omitEmptyTagMap +} + +type structFieldsParser func() *structFields + +func makeDefaultFieldsParser(t reflect.Type) structFieldsParser { + return makeFieldsParser(t, nil, true) +} + +func makeCustomFieldsParser(t reflect.Type, mapper FieldMapperFunc) structFieldsParser { + return makeFieldsParser(t, mapper, false) +} + +func makeFieldsParser(t reflect.Type, mapper FieldMapperFunc, useDefault bool) structFieldsParser { + var once sync.Once + sfs := &structFields{ + noTag: makeStructTaggedFields(), + tagged: map[string]*structTaggedFields{}, + } + + return func() *structFields { + once.Do(func() { + if useDefault { + mapper = DefaultFieldMapper + } + + sfs.parse(t, mapper, "") + }) + + return sfs + } +} + +func (sfs *structFields) parse(t reflect.Type, mapper FieldMapperFunc, prefix string) { + l := t.NumField() + var anonymous []reflect.StructField + + for i := 0; i < l; i++ { + field := t.Field(i) + + // Skip unexported fields that are not embedded structs. + if field.PkgPath != "" && !field.Anonymous { + continue + } + + if field.Anonymous { + ft := field.Type + + // If field is an anonymous struct or pointer to struct, parse it later. + if k := ft.Kind(); k == reflect.Struct || (k == reflect.Ptr && ft.Elem().Kind() == reflect.Struct) { + anonymous = append(anonymous, field) + continue + } + } + + // Parse DBTag. + alias, dbtag := DefaultGetAlias(&field) + + if alias == "-" { + continue + } + + if alias == "" { + alias = field.Name + if mapper != nil { + alias = mapper(alias) + } + } + + // Parse FieldOpt. + fieldopt := field.Tag.Get(FieldOpt) + opts := optRegex.FindAllString(fieldopt, -1) + isQuoted := false + omitEmptyTags := omitEmptyTagMap{} + + for _, opt := range opts { + optMap := getOptMatchedMap(opt) + + switch optMap[optName] { + case fieldOptOmitEmpty: + tags := getTagsFromOptParams(optMap[optParams]) + + for _, tag := range tags { + omitEmptyTags[tag] = struct{}{} + } + + case fieldOptWithQuote: + isQuoted = true + } + } + + // Parse FieldAs. + fieldas := field.Tag.Get(FieldAs) + + // Parse FieldTag. + fieldtag := field.Tag.Get(FieldTag) + tags := splitTags(fieldtag) + + // Make struct field. + structField := &structField{ + Name: field.Name, + Alias: alias, + As: fieldas, + Tags: tags, + IsQuoted: isQuoted, + DBTag: dbtag, + Field: field, + omitEmptyTags: omitEmptyTags, + } + + // Make sure all fields can be added to noTag without conflict. + sfs.noTag.Add(structField) + + for _, tag := range tags { + sfs.taggedFields(tag).Add(structField) + } + } + + for _, field := range anonymous { + ft := dereferencedType(field.Type) + sfs.parse(ft, mapper, prefix+field.Name+".") + } +} + +func (sfs *structFields) FilterTags(with, without []string) *structTaggedFields { + if len(with) == 0 && len(without) == 0 { + return sfs.noTag + } + + // Simply return the tagged fields. + if len(with) == 1 && len(without) == 0 { + return sfs.tagged[with[0]] + } + + // Find out all with and without fields. + taggedFields := makeStructTaggedFields() + filteredReadFields := make(map[string]struct{}, len(sfs.noTag.colsForRead)) + + for _, tag := range without { + if field, ok := sfs.tagged[tag]; ok { + for k := range field.colsForRead { + filteredReadFields[k] = struct{}{} + } + } + } + + if len(with) == 0 { + for _, field := range sfs.noTag.ForRead { + k := field.Key() + + if _, ok := filteredReadFields[k]; !ok { + taggedFields.Add(field) + } + } + } else { + for _, tag := range with { + if fields, ok := sfs.tagged[tag]; ok { + for _, field := range fields.ForRead { + k := field.Key() + + if _, ok := filteredReadFields[k]; !ok { + taggedFields.Add(field) + } + } + } + } + } + + return taggedFields +} + +func (sfs *structFields) taggedFields(tag string) *structTaggedFields { + fields, ok := sfs.tagged[tag] + + if !ok { + fields = makeStructTaggedFields() + sfs.tagged[tag] = fields + } + + return fields +} + +func makeStructTaggedFields() *structTaggedFields { + return &structTaggedFields{ + colsForRead: map[string]*structField{}, + colsForWrite: map[string]struct{}{}, + } +} + +// Add a new field to stfs. +// If field's key exists in stfs.fields, the field is ignored. +func (stfs *structTaggedFields) Add(field *structField) { + key := field.Key() + + if _, ok := stfs.colsForRead[key]; !ok { + stfs.colsForRead[key] = field + stfs.ForRead = append(stfs.ForRead, field) + } + + key = field.Alias + + if _, ok := stfs.colsForWrite[key]; !ok { + stfs.colsForWrite[key] = struct{}{} + stfs.ForWrite = append(stfs.ForWrite, field) + } +} + +// Cols returns the fields whose key is one of cols. +// If any column in cols doesn't exist in sfs.fields, returns nil. +func (stfs *structTaggedFields) Cols(cols []string) []*structField { + fields := make([]*structField, 0, len(cols)) + + for _, col := range cols { + field := stfs.colsForRead[col] + + if field == nil { + return nil + } + + fields = append(fields, field) + } + + return fields +} + +// Key returns the key name to identify a field. +func (sf *structField) Key() string { + if sf.As != "" { + return sf.As + } + + if sf.Alias != "" { + return sf.Alias + } + + return sf.Name +} + +// NameForSelect returns the name for SELECT. +func (sf *structField) NameForSelect(flavor Flavor) string { + if sf.As == "" { + return sf.Quote(flavor) + } + + return fmt.Sprintf("%s AS %s", sf.Quote(flavor), sf.As) +} + +// Quote the Alias in sf with flavor. +func (sf *structField) Quote(flavor Flavor) string { + if !sf.IsQuoted { + return sf.Alias + } + + return flavor.Quote(sf.Alias) +} + +// ShouldOmitEmpty returns true only if any one of tags is in the omitted tags map. +func (sf *structField) ShouldOmitEmpty(tags ...string) (ret bool) { + omit := sf.omitEmptyTags + + if len(omit) == 0 { + return + } + + // Always check default tag. + if _, ret = omit[""]; ret { + return + } + + for _, tag := range tags { + if _, ret = omit[tag]; ret { + return + } + } + + return +} + +type omitEmptyTagMap map[string]struct{} + +func getOptMatchedMap(opt string) (res map[string]string) { + res = map[string]string{} + sm := optRegex.FindStringSubmatch(opt) + + for i, name := range optRegex.SubexpNames() { + if name != "" { + res[name] = sm[i] + } + } + + return +} + +func getTagsFromOptParams(opts string) (tags []string) { + tags = splitTags(opts) + + if len(tags) == 0 { + tags = append(tags, "") + } + + return +} + +func splitTags(fieldtag string) (tags []string) { + parts := strings.Split(fieldtag, ",") + + for _, v := range parts { + tag := strings.TrimSpace(v) + + if tag == "" { + continue + } + + tags = append(tags, tag) + } + + return +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/union.go b/vendor/github.com/huandu/go-sqlbuilder/union.go new file mode 100644 index 000000000..01a946ff9 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/union.go @@ -0,0 +1,362 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "fmt" + "reflect" + + "github.com/huandu/go-clone" +) + +const ( + unionDistinct = " UNION " // Default union type is DISTINCT. + unionAll = " UNION ALL " +) + +const ( + unionMarkerInit injectionMarker = iota + unionMarkerAfterUnion + unionMarkerAfterOrderBy + unionMarkerAfterLimit +) + +// NewUnionBuilder creates a new UNION builder. +func NewUnionBuilder() *UnionBuilder { + return DefaultFlavor.NewUnionBuilder() +} + +func newUnionBuilder() *UnionBuilder { + return &UnionBuilder{ + args: &Args{}, + injection: newInjection(), + } +} + +// Clone returns a deep copy of UnionBuilder. +// It's useful when you want to create a base builder and clone it to build similar queries. +func (ub *UnionBuilder) Clone() *UnionBuilder { + return clone.Clone(ub).(*UnionBuilder) +} + +func init() { + t := reflect.TypeOf(UnionBuilder{}) + clone.SetCustomFunc(t, func(allocator *clone.Allocator, old, new reflect.Value) { + cloned := allocator.CloneSlowly(old) + new.Set(cloned) + + ub := cloned.Addr().Interface().(*UnionBuilder) + for i, b := range ub.builders { + ub.args.Replace(ub.builderVars[i], b) + } + }) +} + +// UnionBuilder is a builder to build UNION. +type UnionBuilder struct { + opt string + orderByCols []string + order string + limitVar string + offsetVar string + + builders []Builder + builderVars []string + + args *Args + + injection *injection + marker injectionMarker +} + +var _ Builder = new(UnionBuilder) + +// Union unions all builders together using UNION operator. +func Union(builders ...Builder) *UnionBuilder { + return DefaultFlavor.NewUnionBuilder().Union(builders...) +} + +// Union unions all builders together using UNION operator. +func (ub *UnionBuilder) Union(builders ...Builder) *UnionBuilder { + return ub.union(unionDistinct, builders...) +} + +// UnionAll unions all builders together using UNION ALL operator. +func UnionAll(builders ...Builder) *UnionBuilder { + return DefaultFlavor.NewUnionBuilder().UnionAll(builders...) +} + +// UnionAll unions all builders together using UNION ALL operator. +func (ub *UnionBuilder) UnionAll(builders ...Builder) *UnionBuilder { + return ub.union(unionAll, builders...) +} + +func (ub *UnionBuilder) union(opt string, builders ...Builder) *UnionBuilder { + builderVars := make([]string, 0, len(builders)) + + for _, b := range builders { + builderVars = append(builderVars, ub.Var(b)) + } + + ub.opt = opt + ub.builders = append([]Builder(nil), builders...) + ub.builderVars = builderVars + ub.marker = unionMarkerAfterUnion + return ub +} + +// OrderBy sets columns of ORDER BY in SELECT. +func (ub *UnionBuilder) OrderBy(col ...string) *UnionBuilder { + ub.orderByCols = col + ub.marker = unionMarkerAfterOrderBy + return ub +} + +// Asc sets order of ORDER BY to ASC. +func (ub *UnionBuilder) Asc() *UnionBuilder { + ub.order = "ASC" + ub.marker = unionMarkerAfterOrderBy + return ub +} + +// Desc sets order of ORDER BY to DESC. +func (ub *UnionBuilder) Desc() *UnionBuilder { + ub.order = "DESC" + ub.marker = unionMarkerAfterOrderBy + return ub +} + +// Limit sets the LIMIT in SELECT. +func (ub *UnionBuilder) Limit(limit int) *UnionBuilder { + if limit < 0 { + ub.limitVar = "" + return ub + } + + ub.limitVar = ub.Var(limit) + ub.marker = unionMarkerAfterLimit + return ub +} + +// Offset sets the LIMIT offset in SELECT. +func (ub *UnionBuilder) Offset(offset int) *UnionBuilder { + if offset < 0 { + ub.offsetVar = "" + return ub + } + + ub.offsetVar = ub.Var(offset) + ub.marker = unionMarkerAfterLimit + return ub +} + +// String returns the compiled SELECT string. +func (ub *UnionBuilder) String() string { + s, _ := ub.Build() + return s +} + +// Build returns compiled SELECT string and args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (ub *UnionBuilder) Build() (sql string, args []interface{}) { + return ub.BuildWithFlavor(ub.args.Flavor) +} + +// BuildWithFlavor returns compiled SELECT string and args with flavor and initial args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (ub *UnionBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + buf := newStringBuilder() + ub.injection.WriteTo(buf, unionMarkerInit) + + nestedSelect := (flavor == Oracle && (len(ub.limitVar) > 0 || len(ub.offsetVar) > 0)) || + (flavor == Informix && len(ub.limitVar) > 0) + + if len(ub.builderVars) > 0 { + needParen := flavor != SQLite + + if nestedSelect { + buf.WriteLeadingString("SELECT * FROM (") + } + + if needParen { + buf.WriteLeadingString("(") + buf.WriteString(ub.builderVars[0]) + buf.WriteRune(')') + } else { + buf.WriteLeadingString(ub.builderVars[0]) + } + + for _, b := range ub.builderVars[1:] { + buf.WriteString(ub.opt) + + if needParen { + buf.WriteRune('(') + } + + buf.WriteString(b) + + if needParen { + buf.WriteRune(')') + } + } + + if nestedSelect { + buf.WriteLeadingString(")") + } + } + + ub.injection.WriteTo(buf, unionMarkerAfterUnion) + + if len(ub.orderByCols) > 0 { + buf.WriteLeadingString("ORDER BY ") + buf.WriteStrings(ub.orderByCols, ", ") + + if ub.order != "" { + buf.WriteRune(' ') + buf.WriteString(ub.order) + } + + ub.injection.WriteTo(buf, unionMarkerAfterOrderBy) + } + + switch flavor { + case MySQL, SQLite, ClickHouse: + if len(ub.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(ub.limitVar) + + if len(ub.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(ub.offsetVar) + } + } + + case CQL: + if len(ub.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(ub.limitVar) + } + + case PostgreSQL: + if len(ub.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(ub.limitVar) + } + + if len(ub.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(ub.offsetVar) + } + + case Presto: + // There might be a hidden constraint in Presto requiring offset to be set before limit. + // The select statement documentation (https://prestodb.io/docs/current/sql/select.html) + // puts offset before limit, and Trino, which is based on Presto, seems + // to require this specific order. + if len(ub.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(ub.offsetVar) + } + + if len(ub.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(ub.limitVar) + } + + case SQLServer: + // If ORDER BY is not set, sort column #1 by default. + // It's required to make OFFSET...FETCH work. + if len(ub.orderByCols) == 0 && (len(ub.limitVar) > 0 || len(ub.offsetVar) > 0) { + buf.WriteLeadingString("ORDER BY 1") + } + + if len(ub.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(ub.offsetVar) + buf.WriteString(" ROWS") + } + + if len(ub.limitVar) > 0 { + if len(ub.offsetVar) == 0 { + buf.WriteLeadingString("OFFSET 0 ROWS") + } + + buf.WriteLeadingString("FETCH NEXT ") + buf.WriteString(ub.limitVar) + buf.WriteString(" ROWS ONLY") + } + + case Oracle: + // It's required to make OFFSET...FETCH work. + if len(ub.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(ub.offsetVar) + buf.WriteString(" ROWS") + } + + if len(ub.limitVar) > 0 { + if len(ub.offsetVar) == 0 { + buf.WriteLeadingString("OFFSET 0 ROWS") + } + + buf.WriteLeadingString("FETCH NEXT ") + buf.WriteString(ub.limitVar) + buf.WriteString(" ROWS ONLY") + } + + case Informix: + // [SKIP N] FIRST M + // M must be greater than 0 + if len(ub.limitVar) > 0 { + if len(ub.offsetVar) > 0 { + buf.WriteLeadingString("SKIP ") + buf.WriteString(ub.offsetVar) + } + + buf.WriteLeadingString("FIRST ") + buf.WriteString(ub.limitVar) + } + + case Doris: + // #192: Doris doesn't support ? in OFFSET and LIMIT. + if len(ub.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(fmt.Sprint(ub.args.Value(ub.limitVar))) + + if len(ub.offsetVar) > 0 { + buf.WriteLeadingString("OFFSET ") + buf.WriteString(fmt.Sprint(ub.args.Value(ub.offsetVar))) + } + } + } + + if len(ub.limitVar) > 0 { + ub.injection.WriteTo(buf, unionMarkerAfterLimit) + } + + return ub.args.CompileWithFlavor(buf.String(), flavor, initialArg...) +} + +// SetFlavor sets the flavor of compiled sql. +func (ub *UnionBuilder) SetFlavor(flavor Flavor) (old Flavor) { + old = ub.args.Flavor + ub.args.Flavor = flavor + return +} + +// Flavor returns flavor of builder +func (ub *UnionBuilder) Flavor() Flavor { + return ub.args.Flavor +} + +// Var returns a placeholder for value. +func (ub *UnionBuilder) Var(arg interface{}) string { + return ub.args.Add(arg) +} + +// SQL adds an arbitrary sql to current position. +func (ub *UnionBuilder) SQL(sql string) *UnionBuilder { + ub.injection.SQL(ub.marker, sql) + return ub +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/update.go b/vendor/github.com/huandu/go-sqlbuilder/update.go new file mode 100644 index 000000000..56e9ed77a --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/update.go @@ -0,0 +1,372 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +import ( + "fmt" + "reflect" + + "github.com/huandu/go-clone" +) + +const ( + updateMarkerInit injectionMarker = iota + updateMarkerAfterWith + updateMarkerAfterUpdate + updateMarkerAfterSet + updateMarkerAfterWhere + updateMarkerAfterOrderBy + updateMarkerAfterLimit + updateMarkerAfterReturning +) + +// NewUpdateBuilder creates a new UPDATE builder. +func NewUpdateBuilder() *UpdateBuilder { + return DefaultFlavor.NewUpdateBuilder() +} + +func newUpdateBuilder() *UpdateBuilder { + args := &Args{} + proxy := &whereClauseProxy{} + return &UpdateBuilder{ + whereClauseProxy: proxy, + whereClauseExpr: args.Add(proxy), + + Cond: Cond{ + Args: args, + }, + args: args, + injection: newInjection(), + } +} + +// Clone returns a deep copy of UpdateBuilder. +// It's useful when you want to create a base builder and clone it to build similar queries. +func (ub *UpdateBuilder) Clone() *UpdateBuilder { + return clone.Clone(ub).(*UpdateBuilder) +} + +func init() { + t := reflect.TypeOf(UpdateBuilder{}) + clone.SetCustomFunc(t, func(allocator *clone.Allocator, old, new reflect.Value) { + cloned := allocator.CloneSlowly(old) + new.Set(cloned) + + ub := cloned.Addr().Interface().(*UpdateBuilder) + ub.args.Replace(ub.whereClauseExpr, ub.whereClauseProxy) + ub.args.Replace(ub.cteBuilderVar, ub.cteBuilder) + }) +} + +// UpdateBuilder is a builder to build UPDATE. +type UpdateBuilder struct { + *WhereClause + Cond + + whereClauseProxy *whereClauseProxy + whereClauseExpr string + + cteBuilderVar string + cteBuilder *CTEBuilder + + tables []string + assignments []string + orderByCols []string + order string + limitVar string + returning []string + + args *Args + + injection *injection + marker injectionMarker +} + +var _ Builder = new(UpdateBuilder) + +// Update sets table name in UPDATE. +func Update(table ...string) *UpdateBuilder { + return DefaultFlavor.NewUpdateBuilder().Update(table...) +} + +// With sets WITH clause (the Common Table Expression) before UPDATE. +func (ub *UpdateBuilder) With(builder *CTEBuilder) *UpdateBuilder { + ub.marker = updateMarkerAfterWith + ub.cteBuilderVar = ub.Var(builder) + ub.cteBuilder = builder + return ub +} + +// Update sets table name in UPDATE. +func (ub *UpdateBuilder) Update(table ...string) *UpdateBuilder { + ub.tables = table + ub.marker = updateMarkerAfterUpdate + return ub +} + +// TableNames returns all table names in this UPDATE statement. +func (ub *UpdateBuilder) TableNames() (tableNames []string) { + var additionalTableNames []string + + if ub.cteBuilder != nil { + additionalTableNames = ub.cteBuilder.tableNamesForFrom() + } + + if len(ub.tables) > 0 && len(additionalTableNames) > 0 { + tableNames = make([]string, len(ub.tables)+len(additionalTableNames)) + copy(tableNames, ub.tables) + copy(tableNames[len(ub.tables):], additionalTableNames) + } else if len(ub.tables) > 0 { + tableNames = ub.tables + } else if len(additionalTableNames) > 0 { + tableNames = additionalTableNames + } + + return tableNames +} + +// Set sets the assignments in SET. +func (ub *UpdateBuilder) Set(assignment ...string) *UpdateBuilder { + ub.assignments = assignment + ub.marker = updateMarkerAfterSet + return ub +} + +// SetMore appends the assignments in SET. +func (ub *UpdateBuilder) SetMore(assignment ...string) *UpdateBuilder { + ub.assignments = append(ub.assignments, assignment...) + ub.marker = updateMarkerAfterSet + return ub +} + +// Where sets expressions of WHERE in UPDATE. +func (ub *UpdateBuilder) Where(andExpr ...string) *UpdateBuilder { + if len(andExpr) == 0 || estimateStringsBytes(andExpr) == 0 { + return ub + } + + if ub.WhereClause == nil { + ub.WhereClause = NewWhereClause() + } + + ub.WhereClause.AddWhereExpr(ub.args, andExpr...) + ub.marker = updateMarkerAfterWhere + return ub +} + +// AddWhereClause adds all clauses in the whereClause to SELECT. +func (ub *UpdateBuilder) AddWhereClause(whereClause *WhereClause) *UpdateBuilder { + if ub.WhereClause == nil { + ub.WhereClause = NewWhereClause() + } + + ub.WhereClause.AddWhereClause(whereClause) + return ub +} + +// Assign represents SET "field = value" in UPDATE. +func (ub *UpdateBuilder) Assign(field string, value interface{}) string { + return fmt.Sprintf("%s = %s", Escape(field), ub.args.Add(value)) +} + +// Incr represents SET "field = field + 1" in UPDATE. +func (ub *UpdateBuilder) Incr(field string) string { + f := Escape(field) + return fmt.Sprintf("%s = %s + 1", f, f) +} + +// Decr represents SET "field = field - 1" in UPDATE. +func (ub *UpdateBuilder) Decr(field string) string { + f := Escape(field) + return fmt.Sprintf("%s = %s - 1", f, f) +} + +// Add represents SET "field = field + value" in UPDATE. +func (ub *UpdateBuilder) Add(field string, value interface{}) string { + f := Escape(field) + return fmt.Sprintf("%s = %s + %s", f, f, ub.args.Add(value)) +} + +// Sub represents SET "field = field - value" in UPDATE. +func (ub *UpdateBuilder) Sub(field string, value interface{}) string { + f := Escape(field) + return fmt.Sprintf("%s = %s - %s", f, f, ub.args.Add(value)) +} + +// Mul represents SET "field = field * value" in UPDATE. +func (ub *UpdateBuilder) Mul(field string, value interface{}) string { + f := Escape(field) + return fmt.Sprintf("%s = %s * %s", f, f, ub.args.Add(value)) +} + +// Div represents SET "field = field / value" in UPDATE. +func (ub *UpdateBuilder) Div(field string, value interface{}) string { + f := Escape(field) + return fmt.Sprintf("%s = %s / %s", f, f, ub.args.Add(value)) +} + +// OrderBy sets columns of ORDER BY in UPDATE. +func (ub *UpdateBuilder) OrderBy(col ...string) *UpdateBuilder { + ub.orderByCols = col + ub.marker = updateMarkerAfterOrderBy + return ub +} + +// Asc sets order of ORDER BY to ASC. +func (ub *UpdateBuilder) Asc() *UpdateBuilder { + ub.order = "ASC" + ub.marker = updateMarkerAfterOrderBy + return ub +} + +// Desc sets order of ORDER BY to DESC. +func (ub *UpdateBuilder) Desc() *UpdateBuilder { + ub.order = "DESC" + ub.marker = updateMarkerAfterOrderBy + return ub +} + +// Limit sets the LIMIT in UPDATE. +func (ub *UpdateBuilder) Limit(limit int) *UpdateBuilder { + if limit < 0 { + ub.limitVar = "" + return ub + } + + ub.limitVar = ub.Var(limit) + ub.marker = updateMarkerAfterLimit + return ub +} + +// Returning sets returning columns. +// For DBMS that doesn't support RETURNING, e.g. MySQL, it will be ignored. +func (ub *UpdateBuilder) Returning(col ...string) *UpdateBuilder { + ub.returning = col + ub.marker = updateMarkerAfterReturning + return ub +} + +// NumAssignment returns the number of assignments to update. +func (ub *UpdateBuilder) NumAssignment() int { + return len(ub.assignments) +} + +// String returns the compiled UPDATE string. +func (ub *UpdateBuilder) String() string { + s, _ := ub.Build() + return s +} + +// Build returns compiled UPDATE string and args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (ub *UpdateBuilder) Build() (sql string, args []interface{}) { + return ub.BuildWithFlavor(ub.args.Flavor) +} + +// BuildWithFlavor returns compiled UPDATE string and args with flavor and initial args. +// They can be used in `DB#Query` of package `database/sql` directly. +func (ub *UpdateBuilder) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + buf := newStringBuilder() + ub.injection.WriteTo(buf, updateMarkerInit) + + if ub.cteBuilder != nil { + buf.WriteLeadingString(ub.cteBuilderVar) + ub.injection.WriteTo(buf, updateMarkerAfterWith) + } + + switch flavor { + case MySQL: + // CTE table names should be written after UPDATE keyword in MySQL. + tableNames := ub.TableNames() + + if len(tableNames) > 0 { + buf.WriteLeadingString("UPDATE ") + buf.WriteStrings(tableNames, ", ") + } + + default: + if len(ub.tables) > 0 { + buf.WriteLeadingString("UPDATE ") + buf.WriteStrings(ub.tables, ", ") + } + } + + ub.injection.WriteTo(buf, updateMarkerAfterUpdate) + + if assignments := filterEmptyStrings(ub.assignments); len(assignments) > 0 { + buf.WriteLeadingString("SET ") + buf.WriteStrings(assignments, ", ") + } + + ub.injection.WriteTo(buf, updateMarkerAfterSet) + + if flavor != MySQL { + // For ISO SQL, CTE table names should be written after FROM keyword. + if ub.cteBuilder != nil { + cteTableNames := ub.cteBuilder.tableNamesForFrom() + + if len(cteTableNames) > 0 { + buf.WriteLeadingString("FROM ") + buf.WriteStrings(cteTableNames, ", ") + } + } + } + + if ub.WhereClause != nil { + ub.whereClauseProxy.WhereClause = ub.WhereClause + defer func() { + ub.whereClauseProxy.WhereClause = nil + }() + + buf.WriteLeadingString(ub.whereClauseExpr) + ub.injection.WriteTo(buf, updateMarkerAfterWhere) + } + + if len(ub.orderByCols) > 0 { + buf.WriteLeadingString("ORDER BY ") + buf.WriteStrings(ub.orderByCols, ", ") + + if ub.order != "" { + buf.WriteLeadingString(ub.order) + } + + ub.injection.WriteTo(buf, updateMarkerAfterOrderBy) + } + + if len(ub.limitVar) > 0 { + buf.WriteLeadingString("LIMIT ") + buf.WriteString(ub.limitVar) + + ub.injection.WriteTo(buf, updateMarkerAfterLimit) + } + + if flavor == PostgreSQL || flavor == SQLite { + if len(ub.returning) > 0 { + buf.WriteLeadingString("RETURNING ") + buf.WriteStrings(ub.returning, ", ") + } + + ub.injection.WriteTo(buf, updateMarkerAfterReturning) + } + + return ub.args.CompileWithFlavor(buf.String(), flavor, initialArg...) +} + +// SetFlavor sets the flavor of compiled sql. +func (ub *UpdateBuilder) SetFlavor(flavor Flavor) (old Flavor) { + old = ub.args.Flavor + ub.args.Flavor = flavor + return +} + +// Flavor returns flavor of builder +func (ub *UpdateBuilder) Flavor() Flavor { + return ub.args.Flavor +} + +// SQL adds an arbitrary sql to current position. +func (ub *UpdateBuilder) SQL(sql string) *UpdateBuilder { + ub.injection.SQL(ub.marker, sql) + return ub +} diff --git a/vendor/github.com/huandu/go-sqlbuilder/whereclause.go b/vendor/github.com/huandu/go-sqlbuilder/whereclause.go new file mode 100644 index 000000000..b88c3a4b4 --- /dev/null +++ b/vendor/github.com/huandu/go-sqlbuilder/whereclause.go @@ -0,0 +1,140 @@ +// Copyright 2018 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package sqlbuilder + +// WhereClause is a Builder for WHERE clause. +// All builders which support `WHERE` clause have an anonymous `WhereClause` field, +// in which the conditions are stored. +// +// WhereClause can be shared among multiple builders. +// However, it is not thread-safe. +type WhereClause struct { + flavor Flavor + clauses []clause +} + +var _ Builder = new(WhereClause) + +// NewWhereClause creates a new WhereClause. +func NewWhereClause() *WhereClause { + return &WhereClause{} +} + +// CopyWhereClause creates a copy of the whereClause. +func CopyWhereClause(whereClause *WhereClause) *WhereClause { + clauses := make([]clause, len(whereClause.clauses)) + copy(clauses, whereClause.clauses) + + return &WhereClause{ + flavor: whereClause.flavor, + clauses: clauses, + } +} + +type clause struct { + args *Args + andExprs []string +} + +func (c *clause) Build(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + exprs := filterEmptyStrings(c.andExprs) + + if len(exprs) == 0 { + return + } + + buf := newStringBuilder() + buf.WriteStrings(exprs, " AND ") + sql, args = c.args.CompileWithFlavor(buf.String(), flavor, initialArg...) + return +} + +// whereClauseProxy is a proxy for WhereClause. +// It's useful when the WhereClause in a build can be changed. +type whereClauseProxy struct { + *WhereClause +} + +var _ Builder = new(whereClauseProxy) + +// BuildWithFlavor builds a WHERE clause with the specified flavor and initial arguments. +func (wc *WhereClause) BuildWithFlavor(flavor Flavor, initialArg ...interface{}) (sql string, args []interface{}) { + if wc == nil || len(wc.clauses) == 0 { + return "", nil + } + + buf := newStringBuilder() + buf.WriteLeadingString("WHERE ") + + sql, args = wc.clauses[0].Build(flavor, initialArg...) + buf.WriteString(sql) + + for _, clause := range wc.clauses[1:] { + buf.WriteString(" AND ") + sql, args = clause.Build(flavor, args...) + buf.WriteString(sql) + } + + return buf.String(), args +} + +// Build returns compiled WHERE clause string and args. +func (wc *WhereClause) Build() (sql string, args []interface{}) { + return wc.BuildWithFlavor(wc.flavor) +} + +// SetFlavor sets the flavor of compiled sql. +// When the WhereClause belongs to a builder, the flavor of the builder will be used when building SQL. +func (wc *WhereClause) SetFlavor(flavor Flavor) (old Flavor) { + old = wc.flavor + wc.flavor = flavor + return +} + +// Flavor returns flavor of clause +func (wc *WhereClause) Flavor() Flavor { + return wc.flavor +} + +// AddWhereExpr adds an AND expression to WHERE clause with the specified arguments. +func (wc *WhereClause) AddWhereExpr(args *Args, andExpr ...string) *WhereClause { + if len(andExpr) == 0 { + return wc + } + + andExprsBytesLen := estimateStringsBytes(andExpr) + + if andExprsBytesLen == 0 { + return wc + } + + // Merge with last clause if possible. + if len(wc.clauses) > 0 { + lastClause := &wc.clauses[len(wc.clauses)-1] + + if lastClause.args == args { + lastClause.andExprs = append(lastClause.andExprs, andExpr...) + return wc + } + } + + wc.clauses = append(wc.clauses, clause{ + args: args, + andExprs: andExpr, + }) + return wc +} + +// AddWhereClause adds all clauses in the whereClause to the wc. +func (wc *WhereClause) AddWhereClause(whereClause *WhereClause) *WhereClause { + if wc == nil { + return nil + } + if whereClause == nil { + return wc + } + + wc.clauses = append(wc.clauses, whereClause.clauses...) + return wc +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md new file mode 100644 index 000000000..d7b4b8d58 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing # + +Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. + +## New API or feature ## + +I want to speak more about how to add new functions to this package. + +Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. + +* Rule 1: Only string algorithm, which takes string as input, can be included. +* Rule 2: If a function has been implemented in package `string`, it must not be included. +* Rule 3: If a function is not language neutral, it must not be included. +* Rule 4: If a function is a part of standard library in other languages, it can be included. +* Rule 5: If a function is quite useful in some famous framework or library, it can be included. + +New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. + +## Pull request ## + +Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. + +If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE new file mode 100644 index 000000000..270177259 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md new file mode 100644 index 000000000..750c3c7eb --- /dev/null +++ b/vendor/github.com/huandu/xstrings/README.md @@ -0,0 +1,117 @@ +# xstrings + +[![Build Status](https://github.com/huandu/xstrings/workflows/Go/badge.svg)](https://github.com/huandu/xstrings/actions) +[![Go Doc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://pkg.go.dev/github.com/huandu/xstrings) +[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) +[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) + +Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). + +All functions are well tested and carefully tuned for performance. + +## Propose a new function + +Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. + +## Install + +Use `go get` to install this library. + + go get github.com/huandu/xstrings + +## API document + +See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. + +## Function list + +Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. + +Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. + +### Package `xstrings` functions + +_Keep this table sorted by Function in ascending order._ + +| Function | Friends | # | +| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------------- | +| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | +| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | +| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | +| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | +| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | +| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | +| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | +| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | +| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | +| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | +| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | +| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | +| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | +| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | +| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | +| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | +| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | +| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | +| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | +| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | + +### Package `strings` functions + +_Keep this table sorted by Function in ascending order._ + +| Function | Friends | +| --------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | +| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | +| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | +| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | +| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | +| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | +| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | +| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | +| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | +| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | +| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | +| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | +| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | +| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | +| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | +| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | +| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | +| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | +| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | +| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | +| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | +| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | +| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | +| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | +| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | +| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | +| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | +| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | +| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | +| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | +| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | +| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | +| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | +| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | +| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | +| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | +| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | + +## License + +This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go new file mode 100644 index 000000000..f427cc84e --- /dev/null +++ b/vendor/github.com/huandu/xstrings/common.go @@ -0,0 +1,21 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +const bufferMaxInitGrowSize = 2048 + +// Lazy initialize a buffer. +func allocBuffer(orig, cur string) *stringBuilder { + output := &stringBuilder{} + maxSize := len(orig) * 4 + + // Avoid to reserve too much memory at once. + if maxSize > bufferMaxInitGrowSize { + maxSize = bufferMaxInitGrowSize + } + + output.Grow(maxSize) + output.WriteString(orig[:len(orig)-len(cur)]) + return output +} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go new file mode 100644 index 000000000..cba0d0725 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -0,0 +1,593 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "math/rand" + "unicode" + "unicode/utf8" +) + +// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. +// +// Some samples. +// +// "some_words" => "SomeWords" +// "http_server" => "HttpServer" +// "no_https" => "NoHttps" +// "_complex__case_" => "_Complex_Case_" +// "some words" => "SomeWords" +func ToCamelCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + var r0, r1 rune + var size int + + // leading connector will appear in output. + for len(str) > 0 { + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !isConnector(r0) { + r0 = unicode.ToUpper(r0) + break + } + + buf.WriteRune(r0) + } + + if len(str) == 0 { + // A special case for a string contains only 1 rune. + if size != 0 { + buf.WriteRune(r0) + } + + return buf.String() + } + + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r0) && isConnector(r1) { + buf.WriteRune(r1) + continue + } + + if isConnector(r1) { + r0 = unicode.ToUpper(r0) + } else { + buf.WriteRune(r1) + } + } + + buf.WriteRune(r0) + return buf.String() +} + +// ToSnakeCase can convert all upper case characters in a string to +// snake case format. +// +// Some samples. +// +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http_20x_ok" +// "Duration2m3s" => "duration_2m3s" +// "Bld4Floor3rd" => "bld4_floor_3rd" +func ToSnakeCase(str string) string { + return camelCaseToLowerCase(str, '_') +} + +// ToKebabCase can convert all upper case characters in a string to +// kebab case format. +// +// Some samples. +// +// "FirstName" => "first-name" +// "HTTPServer" => "http-server" +// "NoHTTPS" => "no-https" +// "GO_PATH" => "go-path" +// "GO PATH" => "go-path" // space is converted to '-'. +// "GO-PATH" => "go-path" // hyphen is converted to '-'. +// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http-20x-ok" +// "Duration2m3s" => "duration-2m3s" +// "Bld4Floor3rd" => "bld4-floor-3rd" +func ToKebabCase(str string) string { + return camelCaseToLowerCase(str, '-') +} + +func camelCaseToLowerCase(str string, connector rune) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + wt, word, remaining := nextWord(str) + + for len(remaining) > 0 { + if wt != connectorWord { + toLower(buf, wt, word, connector) + } + + prev := wt + last := word + wt, word, remaining = nextWord(remaining) + + switch prev { + case numberWord: + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != punctWord && wt != connectorWord { + buf.WriteRune(connector) + } + + case connectorWord: + toLower(buf, prev, last, connector) + + case punctWord: + // nothing. + + default: + if wt != numberWord { + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + if len(remaining) == 0 { + break + } + + last := word + wt, word, remaining = nextWord(remaining) + + // consider number as a part of previous word. + // e.g. "Bld4Floor" => "bld4_floor" + if wt != alphabetWord { + toLower(buf, numberWord, last, connector) + + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + // if there are some lower case letters following a number, + // add connector before the number. + // e.g. "HTTP2xx" => "http_2xx" + buf.WriteRune(connector) + toLower(buf, numberWord, last, connector) + + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + } + } + + toLower(buf, wt, word, connector) + return buf.String() +} + +func isConnector(r rune) bool { + return r == '-' || r == '_' || unicode.IsSpace(r) +} + +type wordType int + +const ( + invalidWord wordType = iota + numberWord + upperCaseWord + alphabetWord + connectorWord + punctWord + otherWord +) + +func nextWord(str string) (wt wordType, word, remaining string) { + if len(str) == 0 { + return + } + + var offset int + remaining = str + r, size := nextValidRune(remaining, utf8.RuneError) + offset += size + + if r == utf8.RuneError { + wt = invalidWord + word = str[:offset] + remaining = str[offset:] + return + } + + switch { + case isConnector(r): + wt = connectorWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isConnector(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsPunct(r): + wt = punctWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsUpper(r): + wt = upperCaseWord + remaining = remaining[size:] + + if len(remaining) == 0 { + break + } + + r, size = nextValidRune(remaining, r) + + switch { + case unicode.IsUpper(r): + prevSize := size + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsUpper(r) { + break + } + + prevSize = size + offset += size + remaining = remaining[size:] + } + + // it's a bit complex when dealing with a case like "HTTPStatus". + // it's expected to be splitted into "HTTP" and "Status". + // Therefore "S" should be in remaining instead of word. + if len(remaining) > 0 && isAlphabet(r) { + offset -= prevSize + remaining = str[offset:] + } + + case isAlphabet(r): + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + case isAlphabet(r): + wt = alphabetWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsNumber(r): + wt = numberWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsNumber(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + default: + wt = otherWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + word = str[:offset] + return +} + +func nextValidRune(str string, prev rune) (r rune, size int) { + var sz int + + for len(str) > 0 { + r, sz = utf8.DecodeRuneInString(str) + size += sz + + if r != utf8.RuneError { + return + } + + str = str[sz:] + } + + r = prev + return +} + +func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { + buf.Grow(buf.Len() + len(str)) + + if wt != upperCaseWord && wt != connectorWord { + buf.WriteString(str) + return + } + + for len(str) > 0 { + r, size := utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r) { + buf.WriteRune(connector) + } else if unicode.IsUpper(r) { + buf.WriteRune(unicode.ToLower(r)) + } else { + buf.WriteRune(r) + } + } +} + +// SwapCase will swap characters case from upper to lower or lower to upper. +func SwapCase(str string) string { + var r rune + var size int + + buf := &stringBuilder{} + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case unicode.IsUpper(r): + buf.WriteRune(unicode.ToLower(r)) + + case unicode.IsLower(r): + buf.WriteRune(unicode.ToUpper(r)) + + default: + buf.WriteRune(r) + } + + str = str[size:] + } + + return buf.String() +} + +// FirstRuneToUpper converts first rune to upper case if necessary. +func FirstRuneToUpper(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsLower(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToUpper(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// FirstRuneToLower converts first rune to lower case if necessary. +func FirstRuneToLower(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsUpper(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToLower(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// Shuffle randomizes runes in a string and returns the result. +// It uses default random source in `math/rand`. +func Shuffle(str string) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + + for i := len(runes) - 1; i > 0; i-- { + index = rand.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// ShuffleSource randomizes runes in a string with given random source. +func ShuffleSource(str string, src rand.Source) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + r := rand.New(src) + + for i := len(runes) - 1; i > 0; i-- { + index = r.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// Successor returns the successor to string. +// +// If there is one alphanumeric rune is found in string, increase the rune by 1. +// If increment generates a "carry", the rune to the left of it is incremented. +// This process repeats until there is no carry, adding an additional rune if necessary. +// +// If there is no alphanumeric rune, the rightmost rune will be increased by 1 +// regardless whether the result is a valid rune or not. +// +// Only following characters are alphanumeric. +// - a - z +// - A - Z +// - 0 - 9 +// +// Samples (borrowed from ruby's String#succ document): +// +// "abcd" => "abce" +// "THX1138" => "THX1139" +// "<>" => "<>" +// "1999zzz" => "2000aaa" +// "ZZZ9999" => "AAAA0000" +// "***" => "**+" +func Successor(str string) string { + if str == "" { + return str + } + + var r rune + var i int + carry := ' ' + runes := []rune(str) + l := len(runes) + lastAlphanumeric := l + + for i = l - 1; i >= 0; i-- { + r = runes[i] + + if ('a' <= r && r <= 'y') || + ('A' <= r && r <= 'Y') || + ('0' <= r && r <= '8') { + runes[i]++ + carry = ' ' + lastAlphanumeric = i + break + } + + switch r { + case 'z': + runes[i] = 'a' + carry = 'a' + lastAlphanumeric = i + + case 'Z': + runes[i] = 'A' + carry = 'A' + lastAlphanumeric = i + + case '9': + runes[i] = '0' + carry = '0' + lastAlphanumeric = i + } + } + + // Needs to add one character for carry. + if i < 0 && carry != ' ' { + buf := &stringBuilder{} + buf.Grow(l + 4) // Reserve enough space for write. + + if lastAlphanumeric != 0 { + buf.WriteString(str[:lastAlphanumeric]) + } + + buf.WriteRune(carry) + + for _, r = range runes[lastAlphanumeric:] { + buf.WriteRune(r) + } + + return buf.String() + } + + // No alphanumeric character. Simply increase last rune's value. + if lastAlphanumeric == l { + runes[l-1]++ + } + + return string(runes) +} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go new file mode 100644 index 000000000..f96e38703 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/count.go @@ -0,0 +1,120 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +// Len returns str's utf8 rune length. +func Len(str string) int { + return utf8.RuneCountInString(str) +} + +// WordCount returns number of words in a string. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordCount(str string) int { + var r rune + var size, n int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + n++ + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + inWord = false + } + + str = str[size:] + } + + return n +} + +const minCJKCharacter = '\u3400' + +// Checks r is a letter but not CJK character. +func isAlphabet(r rune) bool { + if !unicode.IsLetter(r) { + return false + } + + switch { + // Quick check for non-CJK character. + case r < minCJKCharacter: + return true + + // Common CJK characters. + case r >= '\u4E00' && r <= '\u9FCC': + return false + + // Rare CJK characters. + case r >= '\u3400' && r <= '\u4D85': + return false + + // Rare and historic CJK characters. + case r >= '\U00020000' && r <= '\U0002B81D': + return false + } + + return true +} + +// Width returns string width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func Width(str string) int { + var r rune + var size, n int + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + n += RuneWidth(r) + str = str[size:] + } + + return n +} + +// RuneWidth returns character width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func RuneWidth(r rune) int { + switch { + case r == utf8.RuneError || r < '\x20': + return 0 + + case '\x20' <= r && r < '\u2000': + return 1 + + case '\u2000' <= r && r < '\uFF61': + return 2 + + case '\uFF61' <= r && r < '\uFFA0': + return 1 + + case '\uFFA0' <= r: + return 2 + } + + return 0 +} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go new file mode 100644 index 000000000..1a6ef069f --- /dev/null +++ b/vendor/github.com/huandu/xstrings/doc.go @@ -0,0 +1,8 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. +// See project home page for details. https://github.com/huandu/xstrings +// +// Package xstrings assumes all strings are encoded in utf8. +package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go new file mode 100644 index 000000000..b32219bbd --- /dev/null +++ b/vendor/github.com/huandu/xstrings/format.go @@ -0,0 +1,173 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode/utf8" +) + +// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on +// current column and tabSize. +// The column number is reset to zero after each newline ('\n') occurring in the str. +// +// ExpandTabs uses RuneWidth to decide rune's width. +// For example, CJK characters will be treated as two characters. +// +// If tabSize <= 0, ExpandTabs panics with error. +// +// Samples: +// +// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" +// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" +// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" +func ExpandTabs(str string, tabSize int) string { + if tabSize <= 0 { + panic("tab size must be positive") + } + + var r rune + var i, size, column, expand int + var output *stringBuilder + + orig := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == '\t' { + expand = tabSize - column%tabSize + + if output == nil { + output = allocBuffer(orig, str) + } + + for i = 0; i < expand; i++ { + output.WriteRune(' ') + } + + column += expand + } else { + if r == '\n' { + column = 0 + } else { + column += RuneWidth(r) + } + + if output != nil { + output.WriteRune(r) + } + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} + +// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// +// LeftJustify("hello", 4, " ") => "hello" +// LeftJustify("hello", 10, " ") => "hello " +// LeftJustify("hello", 10, "123") => "hello12312" +func LeftJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + output.WriteString(str) + writePadString(output, pad, padLen, remains) + return output.String() +} + +// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// +// RightJustify("hello", 4, " ") => "hello" +// RightJustify("hello", 10, " ") => " hello" +// RightJustify("hello", 10, "123") => "12312hello" +func RightJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains) + output.WriteString(str) + return output.String() +} + +// Center returns a string with pad string at both side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// +// Center("hello", 4, " ") => "hello" +// Center("hello", 10, " ") => " hello " +// Center("hello", 10, "123") => "12hello123" +func Center(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains/2) + output.WriteString(str) + writePadString(output, pad, padLen, (remains+1)/2) + return output.String() +} + +func writePadString(output *stringBuilder, pad string, padLen, remains int) { + var r rune + var size int + + repeats := remains / padLen + + for i := 0; i < repeats; i++ { + output.WriteString(pad) + } + + remains = remains % padLen + + if remains != 0 { + for i := 0; i < remains; i++ { + r, size = utf8.DecodeRuneInString(pad) + output.WriteRune(r) + pad = pad[size:] + } + } +} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go new file mode 100644 index 000000000..ab42fe0fe --- /dev/null +++ b/vendor/github.com/huandu/xstrings/manipulate.go @@ -0,0 +1,220 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "strings" + "unicode/utf8" +) + +// Reverse a utf8 encoded string. +func Reverse(str string) string { + var size int + + tail := len(str) + buf := make([]byte, tail) + s := buf + + for len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + tail -= size + s = append(s[:tail], []byte(str[:size])...) + str = str[size:] + } + + return string(buf) +} + +// Slice a string by rune. +// +// Start must satisfy 0 <= start <= rune length. +// +// End can be positive, zero or negative. +// If end >= 0, start and end must satisfy start <= end <= rune length. +// If end < 0, it means slice to the end of string. +// +// Otherwise, Slice will panic as out of range. +func Slice(str string, start, end int) string { + var size, startPos, endPos int + + origin := str + + if start < 0 || end > len(str) || (end >= 0 && start > end) { + panic("out of range") + } + + if end >= 0 { + end -= start + } + + for start > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + start-- + startPos += size + str = str[size:] + } + + if end < 0 { + return origin[startPos:] + } + + endPos = startPos + + for end > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + end-- + endPos += size + str = str[size:] + } + + if len(str) == 0 && (start > 0 || end > 0) { + panic("out of range") + } + + return origin[startPos:endPos] +} + +// Partition splits a string by sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", Partition returns +// +// "he", "l", "lo" +// +// If str doesn't contain sep, for example "hello" and "x", Partition returns +// +// "hello", "", "" +func Partition(str, sep string) (head, match, tail string) { + index := strings.Index(str, sep) + + if index == -1 { + head = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// LastPartition splits a string by last instance of sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", LastPartition returns +// +// "hel", "l", "o" +// +// If str doesn't contain sep, for example "hello" and "x", LastPartition returns +// +// "", "", "hello" +func LastPartition(str, sep string) (head, match, tail string) { + index := strings.LastIndex(str, sep) + + if index == -1 { + tail = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// Insert src into dst at given rune index. +// Index is counted by runes instead of bytes. +// +// If index is out of range of dst, panic with out of range. +func Insert(dst, src string, index int) string { + return Slice(dst, 0, index) + src + Slice(dst, index, -1) +} + +// Scrub scrubs invalid utf8 bytes with repl string. +// Adjacent invalid bytes are replaced only once. +func Scrub(str, repl string) string { + var buf *stringBuilder + var r rune + var size, pos int + var hasError bool + + origin := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == utf8.RuneError { + if !hasError { + if buf == nil { + buf = &stringBuilder{} + } + + buf.WriteString(origin[:pos]) + hasError = true + } + } else if hasError { + hasError = false + buf.WriteString(repl) + + origin = origin[pos:] + pos = 0 + } + + pos += size + str = str[size:] + } + + if buf != nil { + buf.WriteString(origin) + return buf.String() + } + + // No invalid byte. + return origin +} + +// WordSplit splits a string into words. Returns a slice of words. +// If there is no word in a string, return nil. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordSplit(str string) []string { + var word string + var words []string + var r rune + var size, pos int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + word = str + pos = 0 + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + if inWord { + inWord = false + words = append(words, word[:pos]) + } + } + + pos += size + str = str[size:] + } + + if inWord { + words = append(words, word[:pos]) + } + + return words +} diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go new file mode 100644 index 000000000..06812fea0 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder.go @@ -0,0 +1,8 @@ +//go:build go1.10 +// +build go1.10 + +package xstrings + +import "strings" + +type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go new file mode 100644 index 000000000..ccaa5aedd --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go @@ -0,0 +1,10 @@ +//go:build !go1.10 +// +build !go1.10 + +package xstrings + +import "bytes" + +type stringBuilder struct { + bytes.Buffer +} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go new file mode 100644 index 000000000..1fac6a00b --- /dev/null +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -0,0 +1,552 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +type runeRangeMap struct { + FromLo rune // Lower bound of range map. + FromHi rune // An inclusive higher bound of range map. + ToLo rune + ToHi rune +} + +type runeDict struct { + Dict [unicode.MaxASCII + 1]rune +} + +type runeMap map[rune]rune + +// Translator can translate string with pre-compiled from and to patterns. +// If a from/to pattern pair needs to be used more than once, it's recommended +// to create a Translator and reuse it. +type Translator struct { + quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. + runeMap runeMap // Rune map for translation. + ranges []*runeRangeMap // Ranges of runes. + mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. + reverted bool // If to pattern is empty, all matched characters will be deleted. + hasPattern bool +} + +// NewTranslator creates new Translator through a from/to pattern pair. +func NewTranslator(from, to string) *Translator { + tr := &Translator{} + + if from == "" { + return tr + } + + reverted := from[0] == '^' + deletion := len(to) == 0 + + if reverted { + from = from[1:] + } + + var fromStart, fromEnd, fromRangeStep rune + var toStart, toEnd, toRangeStep rune + var fromRangeSize, toRangeSize rune + var singleRunes []rune + + // Update the to rune range. + updateRange := func() { + // No more rune to read in the to rune pattern. + if toEnd == utf8.RuneError { + return + } + + if toRangeStep == 0 { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) + return + } + + // Current range is not empty. Consume 1 rune from start. + if toStart != toEnd { + toStart += toRangeStep + return + } + + // No more rune. Repeat the last rune. + if to == "" { + toEnd = utf8.RuneError + return + } + + // Both start and end are used. Read two more runes from the to pattern. + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + + if deletion { + toStart = utf8.RuneError + toEnd = utf8.RuneError + } else { + // If from pattern is reverted, only the last rune in the to pattern will be used. + if reverted { + var size int + + for len(to) > 0 { + toStart, size = utf8.DecodeRuneInString(to) + to = to[size:] + } + + toEnd = utf8.RuneError + } else { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + } + + fromEnd = utf8.RuneError + + for len(from) > 0 { + from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) + + // fromStart is a single character. Just map it with a rune in the to pattern. + if fromRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + continue + } + + for toEnd != utf8.RuneError && fromStart != fromEnd { + // If mapped rune is a single character instead of a range, simply shift first + // rune in the range. + if toRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + fromStart += fromRangeStep + continue + } + + fromRangeSize = (fromEnd - fromStart) * fromRangeStep + toRangeSize = (toEnd - toStart) * toRangeStep + + // Not enough runes in the to pattern. Need to read more. + if fromRangeSize > toRangeSize { + fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) + fromStart += fromRangeStep + updateRange() + + // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered + // as a single rune. + if fromStart == fromEnd { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + } + + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) + updateRange() + break + } + + if fromStart == fromEnd { + fromEnd = utf8.RuneError + continue + } + + _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) + fromEnd = utf8.RuneError + } + + if fromEnd != utf8.RuneError { + tr.addRune(fromEnd, toStart, singleRunes) + } + + tr.reverted = reverted + tr.mappedRune = -1 + tr.hasPattern = true + + // Translate RuneError only if in deletion or reverted mode. + if deletion || reverted { + tr.mappedRune = toStart + } + + return tr +} + +func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { + if from <= unicode.MaxASCII { + if tr.quickDict == nil { + tr.quickDict = &runeDict{} + } + + tr.quickDict.Dict[from] = to + } else { + if tr.runeMap == nil { + tr.runeMap = make(runeMap) + } + + tr.runeMap[from] = to + } + + singleRunes = append(singleRunes, from) + return singleRunes +} + +func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { + var r rune + var rrm *runeRangeMap + + if fromLo < fromHi { + rrm = &runeRangeMap{ + FromLo: fromLo, + FromHi: fromHi, + ToLo: toLo, + ToHi: toHi, + } + } else { + rrm = &runeRangeMap{ + FromLo: fromHi, + FromHi: fromLo, + ToLo: toHi, + ToHi: toLo, + } + } + + // If there is any single rune conflicts with this rune range, clear single rune record. + for _, r = range singleRunes { + if rrm.FromLo <= r && r <= rrm.FromHi { + if r <= unicode.MaxASCII { + tr.quickDict.Dict[r] = 0 + } else { + delete(tr.runeMap, r) + } + } + } + + tr.ranges = append(tr.ranges, rrm) + return fromHi, toHi +} + +func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { + var r rune + var size int + + remaining = str + escaping := false + isRange := false + + for len(remaining) > 0 { + r, size = utf8.DecodeRuneInString(remaining) + remaining = remaining[size:] + + // Parse special characters. + if !escaping { + if r == '\\' { + escaping = true + continue + } + + if r == '-' { + // Ignore slash at beginning of string. + if last == utf8.RuneError { + continue + } + + start = last + isRange = true + continue + } + } + + escaping = false + + if last != utf8.RuneError { + // This is a range which start and end are the same. + // Considier it as a normal character. + if isRange && last == r { + isRange = false + continue + } + + start = last + end = r + + if isRange { + if start < end { + rangeStep = 1 + } else { + rangeStep = -1 + } + } + + return + } + + last = r + } + + start = last + end = utf8.RuneError + return +} + +// Translate str with a from/to pattern pair. +// +// See comment in Translate function for usage and samples. +func (tr *Translator) Translate(str string) string { + if !tr.hasPattern || str == "" { + return str + } + + var r rune + var size int + var needTr bool + + orig := str + + var output *stringBuilder + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + r, needTr = tr.TranslateRune(r) + + if needTr && output == nil { + output = allocBuffer(orig, str) + } + + if r != utf8.RuneError && output != nil { + output.WriteRune(r) + } + + str = str[size:] + } + + // No character is translated. + if output == nil { + return orig + } + + return output.String() +} + +// TranslateRune return translated rune and true if r matches the from pattern. +// If r doesn't match the pattern, original r is returned and translated is false. +func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { + switch { + case tr.quickDict != nil: + if r <= unicode.MaxASCII { + result = tr.quickDict.Dict[r] + + if result != 0 { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + } + + fallthrough + + case tr.runeMap != nil: + var ok bool + + if result, ok = tr.runeMap[r]; ok { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + + fallthrough + + default: + var rrm *runeRangeMap + ranges := tr.ranges + + for i := len(ranges) - 1; i >= 0; i-- { + rrm = ranges[i] + + if rrm.FromLo <= r && r <= rrm.FromHi { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + break + } + + if rrm.ToLo < rrm.ToHi { + result = rrm.ToLo + r - rrm.FromLo + } else if rrm.ToLo > rrm.ToHi { + // ToHi can be smaller than ToLo if range is from higher to lower. + result = rrm.ToLo - r + rrm.FromLo + } else { + result = rrm.ToLo + } + + break + } + } + } + + if tr.reverted { + if !translated { + result = tr.mappedRune + } + + translated = !translated + } + + if !translated { + result = r + } + + return +} + +// HasPattern returns true if Translator has one pattern at least. +func (tr *Translator) HasPattern() bool { + return tr.hasPattern +} + +// Translate str with the characters defined in from replaced by characters defined in to. +// +// From and to are patterns representing a set of characters. Pattern is defined as following. +// +// Special characters: +// +// 1. '-' means a range of runes, e.g. +// "a-z" means all characters from 'a' to 'z' inclusive; +// "z-a" means all characters from 'z' to 'a' inclusive. +// 2. '^' as first character means a set of all runes excepted listed, e.g. +// "^a-z" means all characters except 'a' to 'z' inclusive. +// 3. '\' escapes special characters. +// +// Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. +// +// Translate will try to find a 1:1 mapping from from to to. +// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. +// +// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. +// +// If the to pattern is an empty string, Translate works exactly the same as Delete. +// +// Samples: +// +// Translate("hello", "aeiou", "12345") => "h2ll4" +// Translate("hello", "a-z", "A-Z") => "HELLO" +// Translate("hello", "z-a", "a-z") => "svool" +// Translate("hello", "aeiou", "*") => "h*ll*" +// Translate("hello", "^l", "*") => "**ll*" +// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" +func Translate(str, from, to string) string { + tr := NewTranslator(from, to) + return tr.Translate(str) +} + +// Delete runes in str matching the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// +// Delete("hello", "aeiou") => "hll" +// Delete("hello", "a-k") => "llo" +// Delete("hello", "^a-k") => "he" +func Delete(str, pattern string) string { + tr := NewTranslator(pattern, "") + return tr.Translate(str) +} + +// Count how many runes in str match the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// +// Count("hello", "aeiou") => 3 +// Count("hello", "a-k") => 3 +// Count("hello", "^a-k") => 2 +func Count(str, pattern string) int { + if pattern == "" || str == "" { + return 0 + } + + var r rune + var size int + var matched bool + + tr := NewTranslator(pattern, "") + cnt := 0 + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if _, matched = tr.TranslateRune(r); matched { + cnt++ + } + } + + return cnt +} + +// Squeeze deletes adjacent repeated runes in str. +// If pattern is not empty, only runes matching the pattern will be squeezed. +// +// Samples: +// +// Squeeze("hello", "") => "helo" +// Squeeze("hello", "m-z") => "hello" +// Squeeze("hello world", " ") => "hello world" +func Squeeze(str, pattern string) string { + var last, r rune + var size int + var skipSqueeze, matched bool + var tr *Translator + var output *stringBuilder + + orig := str + last = -1 + + if len(pattern) > 0 { + tr = NewTranslator(pattern, "") + } + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + // Need to squeeze the str. + if last == r && !skipSqueeze { + if tr != nil { + if _, matched = tr.TranslateRune(r); !matched { + skipSqueeze = true + } + } + + if output == nil { + output = allocBuffer(orig, str) + } + + if skipSqueeze { + output.WriteRune(r) + } + } else { + if output != nil { + output.WriteRune(r) + } + + last = r + skipSqueeze = false + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore b/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore new file mode 100644 index 000000000..aaadf736e --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore @@ -0,0 +1,32 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Code coverage profiles and other test artifacts +*.out +coverage.* +*.coverprofile +profile.cov + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env + +# Editor/IDE +# .idea/ +# .vscode/ diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes b/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes new file mode 100644 index 000000000..4dd588a00 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes @@ -0,0 +1,5 @@ +Changes +======= + +v1.0.0 18 Aug 2025 +* Initial release \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE b/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE new file mode 100644 index 000000000..1e1f5d199 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go b/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go new file mode 100644 index 000000000..85650efa1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go @@ -0,0 +1,29 @@ +package dsigsecp256k1 + +import ( + "crypto" + + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/lestrrat-go/dsig" +) + +const ECDSAWithSecp256k1AndSHA256 = "ECDSA_WITH_SECP256K1_AND_SHA256" + +// init adds secp256k1 support when the dsig_secp256k1 build tag is used. +func init() { + // Register ES256K (secp256k1 + SHA256) support using the new API + err := dsig.RegisterAlgorithm(ECDSAWithSecp256k1AndSHA256, dsig.AlgorithmInfo{ + Family: dsig.ECDSA, + Meta: dsig.ECDSAFamilyMeta{ + Hash: crypto.SHA256, + }, + }) + if err != nil { + panic("failed to register secp256k1 algorithm: " + err.Error()) + } +} + +// secp256k1Curve returns the secp256k1 curve. +func Curve() *secp256k1.KoblitzCurve { + return secp256k1.S256() +} diff --git a/vendor/github.com/lestrrat-go/dsig/.gitignore b/vendor/github.com/lestrrat-go/dsig/.gitignore new file mode 100644 index 000000000..aaadf736e --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/.gitignore @@ -0,0 +1,32 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Code coverage profiles and other test artifacts +*.out +coverage.* +*.coverprofile +profile.cov + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env + +# Editor/IDE +# .idea/ +# .vscode/ diff --git a/vendor/github.com/lestrrat-go/dsig/Changes b/vendor/github.com/lestrrat-go/dsig/Changes new file mode 100644 index 000000000..bccce9761 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/Changes @@ -0,0 +1,5 @@ +Changes +======= + +v1.0.0 - 18 Aug 2025 +* Initial release \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig/LICENSE b/vendor/github.com/lestrrat-go/dsig/LICENSE new file mode 100644 index 000000000..1e1f5d199 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/dsig/README.md b/vendor/github.com/lestrrat-go/dsig/README.md new file mode 100644 index 000000000..37c194579 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/README.md @@ -0,0 +1,163 @@ +# github.com/lestrrat-go/dsig [![CI](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml/badge.svg)](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/dsig.svg)](https://pkg.go.dev/github.com/lestrrat-go/dsig) [![codecov.io](https://codecov.io/github/lestrrat-go/dsig/coverage.svg?branch=v1)](https://codecov.io/github/lestrrat-go/dsig?branch=v1) + +Go module providing low-level digital signature operations. + +While there are many standards for generating and verifying digital signatures, the core operations are virtually the same. This module implements the core functionality of digital signature generation / verifications in a framework agnostic way. + +# Features + +* RSA signatures (PKCS1v15 and PSS) +* ECDSA signatures (P-256, P-384, P-521) +* EdDSA signatures (Ed25519, Ed448) +* HMAC signatures (SHA-256, SHA-384, SHA-512) +* Support for crypto.Signer interface +* Allows for dynamic additions of algorithms in limited cases. + +# SYNOPSIS + + +```go +package examples_test + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "fmt" + + "github.com/lestrrat-go/dsig" +) + +func Example() { + payload := []byte("hello world") + + // RSA signing and verification + { + privKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + fmt.Printf("failed to generate RSA key: %s\n", err) + return + } + + // Sign with RSA-PSS SHA256 + signature, err := dsig.Sign(privKey, dsig.RSAPSSWithSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with RSA: %s\n", err) + return + } + + // Verify with RSA-PSS SHA256 + err = dsig.Verify(&privKey.PublicKey, dsig.RSAPSSWithSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify RSA signature: %s\n", err) + return + } + } + + // ECDSA signing and verification + { + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + fmt.Printf("failed to generate ECDSA key: %s\n", err) + return + } + + // Sign with ECDSA P-256 SHA256 + signature, err := dsig.Sign(privKey, dsig.ECDSAWithP256AndSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with ECDSA: %s\n", err) + return + } + + // Verify with ECDSA P-256 SHA256 + err = dsig.Verify(&privKey.PublicKey, dsig.ECDSAWithP256AndSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify ECDSA signature: %s\n", err) + return + } + } + + // EdDSA signing and verification + { + pubKey, privKey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + fmt.Printf("failed to generate Ed25519 key: %s\n", err) + return + } + + // Sign with EdDSA + signature, err := dsig.Sign(privKey, dsig.EdDSA, payload, nil) + if err != nil { + fmt.Printf("failed to sign with EdDSA: %s\n", err) + return + } + + // Verify with EdDSA + err = dsig.Verify(pubKey, dsig.EdDSA, payload, signature) + if err != nil { + fmt.Printf("failed to verify EdDSA signature: %s\n", err) + return + } + } + + // HMAC signing and verification + { + key := []byte("secret-key") + + // Sign with HMAC SHA256 + signature, err := dsig.Sign(key, dsig.HMACWithSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with HMAC: %s\n", err) + return + } + + // Verify with HMAC SHA256 + err = dsig.Verify(key, dsig.HMACWithSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify HMAC signature: %s\n", err) + return + } + } + // OUTPUT: +} +``` +source: [examples/dsig_readme_example_test.go](https://github.com/lestrrat-go/dsig/blob/v1/examples/dsig_readme_example_test.go) + + +# Supported Algorithms + +| Constant | Algorithm | Key Type | +|----------|-----------|----------| +| `HMACWithSHA256` | HMAC using SHA-256 | []byte | +| `HMACWithSHA384` | HMAC using SHA-384 | []byte | +| `HMACWithSHA512` | HMAC using SHA-512 | []byte | +| `RSAPKCS1v15WithSHA256` | RSA PKCS#1 v1.5 using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPKCS1v15WithSHA384` | RSA PKCS#1 v1.5 using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPKCS1v15WithSHA512` | RSA PKCS#1 v1.5 using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA256` | RSA PSS using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA384` | RSA PSS using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA512` | RSA PSS using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey | +| `ECDSAWithP256AndSHA256` | ECDSA using P-256 and SHA-256 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `ECDSAWithP384AndSHA384` | ECDSA using P-384 and SHA-384 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `ECDSAWithP521AndSHA512` | ECDSA using P-521 and SHA-512 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `EdDSA` | EdDSA using Ed25519 or Ed448 | ed25519.PrivateKey / ed25519.PublicKey | + +# Description + +This library provides low-level digital signature operations. It does minimal parameter validation for performance, uses strongly typed APIs, and has minimal dependencies. + +# Contributions + +## Issues + +For bug reports and feature requests, please include failing tests when possible. + +## Pull Requests + +Please include tests that exercise your changes. + +# Related Libraries + +* [github.com/lestrrat-go/jwx](https://github.com/lestrrat-go/jwx) - JOSE (JWA/JWE/JWK/JWS/JWT) implementation \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig/algorithms.go b/vendor/github.com/lestrrat-go/dsig/algorithms.go new file mode 100644 index 000000000..0895c6476 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/algorithms.go @@ -0,0 +1,37 @@ +package dsig + +// This file defines verbose algorithm name constants that can be mapped to by +// different standards (RFC7518, FIDO, etc.) for interoperability. +// +// The algorithm names are intentionally verbose to avoid any ambiguity about +// the exact cryptographic operations being performed. + +const ( + // HMAC signature algorithms + // These use Hash-based Message Authentication Code with specified hash functions + HMACWithSHA256 = "HMAC_WITH_SHA256" + HMACWithSHA384 = "HMAC_WITH_SHA384" + HMACWithSHA512 = "HMAC_WITH_SHA512" + + // RSA signature algorithms with PKCS#1 v1.5 padding + // These use RSA signatures with PKCS#1 v1.5 padding and specified hash functions + RSAPKCS1v15WithSHA256 = "RSA_PKCS1v15_WITH_SHA256" + RSAPKCS1v15WithSHA384 = "RSA_PKCS1v15_WITH_SHA384" + RSAPKCS1v15WithSHA512 = "RSA_PKCS1v15_WITH_SHA512" + + // RSA signature algorithms with PSS padding + // These use RSA signatures with Probabilistic Signature Scheme (PSS) padding + RSAPSSWithSHA256 = "RSA_PSS_WITH_SHA256" + RSAPSSWithSHA384 = "RSA_PSS_WITH_SHA384" + RSAPSSWithSHA512 = "RSA_PSS_WITH_SHA512" + + // ECDSA signature algorithms + // These use Elliptic Curve Digital Signature Algorithm with specified curves and hash functions + ECDSAWithP256AndSHA256 = "ECDSA_WITH_P256_AND_SHA256" + ECDSAWithP384AndSHA384 = "ECDSA_WITH_P384_AND_SHA384" + ECDSAWithP521AndSHA512 = "ECDSA_WITH_P521_AND_SHA512" + + // EdDSA signature algorithms + // These use Edwards-curve Digital Signature Algorithm (supports Ed25519 and Ed448) + EdDSA = "EDDSA" +) \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig/crypto_signer.go b/vendor/github.com/lestrrat-go/dsig/crypto_signer.go new file mode 100644 index 000000000..f81666708 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/crypto_signer.go @@ -0,0 +1,45 @@ +package dsig + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" +) + +// cryptosign is a low-level function that signs a payload using a crypto.Signer. +// If hash is crypto.Hash(0), the payload is signed directly without hashing. +// Otherwise, the payload is hashed using the specified hash function before signing. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +func cryptosign(signer crypto.Signer, payload []byte, hash crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) { + if rr == nil { + rr = rand.Reader + } + + var digest []byte + if hash == crypto.Hash(0) { + digest = payload + } else { + h := hash.New() + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload to hash: %w`, err) + } + digest = h.Sum(nil) + } + return signer.Sign(rr, digest, opts) +} + +// SignCryptoSigner generates a signature using a crypto.Signer interface. +// This function can be used for hardware security modules, smart cards, +// and other implementations of the crypto.Signer interface. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +// +// Returns the signature bytes or an error if signing fails. +func SignCryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) { + if signer == nil { + return nil, fmt.Errorf("dsig.SignCryptoSigner: signer is nil") + } + return cryptosign(signer, raw, h, opts, rr) +} diff --git a/vendor/github.com/lestrrat-go/dsig/dsig.go b/vendor/github.com/lestrrat-go/dsig/dsig.go new file mode 100644 index 000000000..de6cbdec4 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/dsig.go @@ -0,0 +1,224 @@ +// Package dsig provides digital signature operations for Go. +// It contains low-level signature generation and verification tools that +// can be used by other signing libraries +// +// The package follows these design principles: +// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid. +// 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to). +// 3. Does not rely on other high-level packages (standalone, except for internal packages). +package dsig + +import ( + "crypto" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + "sync" +) + +// Family represents the cryptographic algorithm family +type Family int + +const ( + InvalidFamily Family = iota + HMAC + RSA + ECDSA + EdDSAFamily + maxFamily +) + +// String returns the string representation of the Family +func (f Family) String() string { + switch f { + case HMAC: + return "HMAC" + case RSA: + return "RSA" + case ECDSA: + return "ECDSA" + case EdDSAFamily: + return "EdDSA" + default: + return "InvalidFamily" + } +} + +// AlgorithmInfo contains metadata about a digital signature algorithm +type AlgorithmInfo struct { + Family Family // The cryptographic family (HMAC, RSA, ECDSA, EdDSA) + Meta any // Family-specific metadata +} + +// HMACFamilyMeta contains metadata specific to HMAC algorithms +type HMACFamilyMeta struct { + HashFunc func() hash.Hash // Hash function constructor +} + +// RSAFamilyMeta contains metadata specific to RSA algorithms +type RSAFamilyMeta struct { + Hash crypto.Hash // Hash algorithm + PSS bool // Whether to use PSS padding (false = PKCS#1 v1.5) +} + +// ECDSAFamilyMeta contains metadata specific to ECDSA algorithms +type ECDSAFamilyMeta struct { + Hash crypto.Hash // Hash algorithm +} + +// EdDSAFamilyMeta contains metadata specific to EdDSA algorithms +// Currently EdDSA doesn't need specific metadata, but this provides extensibility +type EdDSAFamilyMeta struct { + // Reserved for future use +} + +var algorithms = make(map[string]AlgorithmInfo) +var muAlgorithms sync.RWMutex + +// RegisterAlgorithm registers a new digital signature algorithm with the specified family and metadata. +// +// info.Meta should contain extra metadata for some algorithms. Currently HMAC, RSA, +// and ECDSA family of algorithms need their respective metadata (HMACFamilyMeta, +// RSAFamilyMeta, and ECDSAFamilyMeta). Metadata for other families are ignored. +func RegisterAlgorithm(name string, info AlgorithmInfo) error { + muAlgorithms.Lock() + defer muAlgorithms.Unlock() + + // Validate the metadata matches the family + switch info.Family { + case HMAC: + if _, ok := info.Meta.(HMACFamilyMeta); !ok { + return fmt.Errorf("invalid HMAC metadata for algorithm %s", name) + } + case RSA: + if _, ok := info.Meta.(RSAFamilyMeta); !ok { + return fmt.Errorf("invalid RSA metadata for algorithm %s", name) + } + case ECDSA: + if _, ok := info.Meta.(ECDSAFamilyMeta); !ok { + return fmt.Errorf("invalid ECDSA metadata for algorithm %s", name) + } + case EdDSAFamily: + // EdDSA metadata is optional for now + default: + return fmt.Errorf("unsupported algorithm family %s for algorithm %s", info.Family, name) + } + + algorithms[name] = info + return nil +} + +// GetAlgorithmInfo retrieves the algorithm information for a given algorithm name. +// Returns the info and true if found, zero value and false if not found. +func GetAlgorithmInfo(name string) (AlgorithmInfo, bool) { + muAlgorithms.RLock() + defer muAlgorithms.RUnlock() + + info, ok := algorithms[name] + return info, ok +} + +func init() { + // Register all standard algorithms with their metadata + toRegister := map[string]AlgorithmInfo{ + // HMAC algorithms + HMACWithSHA256: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha256.New, + }, + }, + HMACWithSHA384: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha512.New384, + }, + }, + HMACWithSHA512: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha512.New, + }, + }, + + // RSA PKCS#1 v1.5 algorithms + RSAPKCS1v15WithSHA256: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA256, + PSS: false, + }, + }, + RSAPKCS1v15WithSHA384: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA384, + PSS: false, + }, + }, + RSAPKCS1v15WithSHA512: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA512, + PSS: false, + }, + }, + + // RSA PSS algorithms + RSAPSSWithSHA256: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA256, + PSS: true, + }, + }, + RSAPSSWithSHA384: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA384, + PSS: true, + }, + }, + RSAPSSWithSHA512: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA512, + PSS: true, + }, + }, + + // ECDSA algorithms + ECDSAWithP256AndSHA256: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA256, + }, + }, + ECDSAWithP384AndSHA384: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA384, + }, + }, + ECDSAWithP521AndSHA512: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA512, + }, + }, + + // EdDSA algorithm + EdDSA: { + Family: EdDSAFamily, + Meta: EdDSAFamilyMeta{}, + }, + } + + for name, info := range toRegister { + if err := RegisterAlgorithm(name, info); err != nil { + panic(fmt.Sprintf("failed to register algorithm %s: %v", name, err)) + } + } +} + diff --git a/vendor/github.com/lestrrat-go/dsig/ecdsa.go b/vendor/github.com/lestrrat-go/dsig/ecdsa.go new file mode 100644 index 000000000..a04a26691 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/ecdsa.go @@ -0,0 +1,200 @@ +package dsig + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "encoding/asn1" + "fmt" + "io" + "math/big" + + "github.com/lestrrat-go/dsig/internal/ecutil" +) + + +func ecdsaGetSignerKey(key any) (*ecdsa.PrivateKey, crypto.Signer, bool, error) { + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + if !isValidECDSAKey(key) { + return nil, nil, false, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key) + } + + switch key.(type) { + case ecdsa.PrivateKey, *ecdsa.PrivateKey: + // if it's ecdsa.PrivateKey, it's more efficient to + // go through the non-crypto.Signer route. Set isCryptoSigner to false + isCryptoSigner = false + } + } + + if isCryptoSigner { + return nil, cs, true, nil + } + + privkey, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PrivateKey is required`, key) + } + return privkey, nil, false, nil +} + +// UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values. +// This is typically used when working with crypto.Signer interfaces that return ASN.1 encoded signatures. +func UnpackASN1ECDSASignature(signed []byte, r, s *big.Int) error { + // Okay, this is silly, but hear me out. When we use the + // crypto.Signer interface, the PrivateKey is hidden. + // But we need some information about the key (its bit size). + // + // So while silly, we're going to have to make another call + // here and fetch the Public key. + // (This probably means that this information should be cached somewhere) + var p struct { + R *big.Int // TODO: get this from a pool? + S *big.Int + } + if _, err := asn1.Unmarshal(signed, &p); err != nil { + return fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err) + } + + r.Set(p.R) + s.Set(p.S) + return nil +} + +// UnpackECDSASignature unpacks a JWS-format ECDSA signature into r and s values. +// The signature should be in the format specified by RFC 7515 (r||s as fixed-length byte arrays). +func UnpackECDSASignature(signature []byte, pubkey *ecdsa.PublicKey, r, s *big.Int) error { + keySize := ecutil.CalculateKeySize(pubkey.Curve) + if len(signature) != keySize*2 { + return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name) + } + + r.SetBytes(signature[:keySize]) + s.SetBytes(signature[keySize:]) + + return nil +} + +// PackECDSASignature packs the r and s values from an ECDSA signature into a JWS-format byte slice. +// The output format follows RFC 7515: r||s as fixed-length byte arrays. +func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error) { + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // Serialize r and s into fixed-length bytes + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := sbig.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + // Output as r||s + return append(rBytesPadded, sBytesPadded...), nil +} + +// SignECDSA generates an ECDSA signature for the given payload using the specified private key and hash. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// +// rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader. +func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { + if !isValidECDSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key) + } + hh := h.New() + if _, err := hh.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err) + } + digest := hh.Sum(nil) + + if rr == nil { + rr = rand.Reader + } + + // Sign and get r, s values + r, s, err := ecdsa.Sign(rr, key, digest) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err) + } + + return PackECDSASignature(r, s, key.Curve.Params().BitSize) +} + +// SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface. +// This function works with hardware security modules and other crypto.Signer implementations. +// The signature is converted from ASN.1 format to JWS format (r||s). +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +func SignECDSACryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { + signed, err := SignCryptoSigner(signer, raw, h, h, rr) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using crypto.Signer: %w`, err) + } + + return signECDSACryptoSigner(signer, signed) +} + +func signECDSACryptoSigner(signer crypto.Signer, signed []byte) ([]byte, error) { + cpub := signer.Public() + pubkey, ok := cpub.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey) + } + curveBits := pubkey.Curve.Params().BitSize + + var r, s big.Int + if err := UnpackASN1ECDSASignature(signed, &r, &s); err != nil { + return nil, fmt.Errorf(`failed to unpack ASN1 encoded signature: %w`, err) + } + + return PackECDSASignature(&r, &s, curveBits) +} + +func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) error { + hasher := h.New() + hasher.Write(buf) + digest := hasher.Sum(nil) + if !ecdsa.Verify(key, digest, r, s) { + return NewVerificationError("invalid ECDSA signature") + } + return nil +} + +// VerifyECDSA verifies an ECDSA signature for the given payload. +// This function verifies the signature using the specified public key and hash algorithm. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error { + var r, s big.Int + if err := UnpackECDSASignature(signature, key, &r, &s); err != nil { + return fmt.Errorf("dsig.VerifyECDSA: failed to unpack ECDSA signature: %w", err) + } + + return ecdsaVerify(key, payload, h, &r, &s) +} + +// VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations. +// This function is useful for verifying signatures created by hardware security modules +// or other implementations of the crypto.Signer interface. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyECDSACryptoSigner(signer crypto.Signer, payload, signature []byte, h crypto.Hash) error { + var pubkey *ecdsa.PublicKey + switch cpub := signer.Public(); cpub := cpub.(type) { + case ecdsa.PublicKey: + pubkey = &cpub + case *ecdsa.PublicKey: + pubkey = cpub + default: + return fmt.Errorf(`dsig.VerifyECDSACryptoSigner: expected *ecdsa.PublicKey, got %T`, cpub) + } + + var r, s big.Int + if err := UnpackECDSASignature(signature, pubkey, &r, &s); err != nil { + return fmt.Errorf("dsig.VerifyECDSACryptoSigner: failed to unpack ASN.1 encoded ECDSA signature: %w", err) + } + + return ecdsaVerify(pubkey, payload, h, &r, &s) +} diff --git a/vendor/github.com/lestrrat-go/dsig/eddsa.go b/vendor/github.com/lestrrat-go/dsig/eddsa.go new file mode 100644 index 000000000..6562da37b --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/eddsa.go @@ -0,0 +1,44 @@ +package dsig + +import ( + "crypto" + "crypto/ed25519" + "fmt" +) + +func eddsaGetSigner(key any) (crypto.Signer, error) { + // The ed25519.PrivateKey object implements crypto.Signer, so we should + // simply accept a crypto.Signer here. + signer, ok := key.(crypto.Signer) + if ok { + if !isValidEDDSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for EdDSA algorithm`, key) + } + return signer, nil + } + + // This fallback exists for cases when users give us a pointer instead of non-pointer, etc. + privkey, ok := key.(ed25519.PrivateKey) + if !ok { + return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T`, key) + } + return privkey, nil +} + +// SignEdDSA generates an EdDSA (Ed25519) signature for the given payload. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// EdDSA is deterministic and doesn't require additional hashing of the input. +func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) { + return ed25519.Sign(key, payload), nil +} + +// VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload. +// This function verifies the signature using Ed25519 verification algorithm. +// The payload parameter should be the pre-computed signing input (typically header.payload). +// EdDSA is deterministic and provides strong security guarantees without requiring hash function selection. +func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error { + if !ed25519.Verify(key, payload, signature) { + return fmt.Errorf("invalid EdDSA signature") + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/dsig/hmac.go b/vendor/github.com/lestrrat-go/dsig/hmac.go new file mode 100644 index 000000000..8b2612279 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/hmac.go @@ -0,0 +1,45 @@ +package dsig + +import ( + "crypto/hmac" + "fmt" + "hash" +) + +func toHMACKey(dst *[]byte, key any) error { + keyBytes, ok := key.([]byte) + if !ok { + return fmt.Errorf(`dsig.toHMACKey: invalid key type %T. []byte is required`, key) + } + + if len(keyBytes) == 0 { + return fmt.Errorf(`dsig.toHMACKey: missing key while signing payload`) + } + + *dst = keyBytes + return nil +} + +// SignHMAC generates an HMAC signature for the given payload using the specified hash function and key. +// The raw parameter should be the pre-computed signing input (typically header.payload). +func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) { + h := hmac.New(hfunc, key) + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err) + } + return h.Sum(nil), nil +} + +// VerifyHMAC verifies an HMAC signature for the given payload. +// This function verifies the signature using the specified key and hash function. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error { + expected, err := SignHMAC(key, payload, hfunc) + if err != nil { + return fmt.Errorf("failed to sign payload for verification: %w", err) + } + if !hmac.Equal(signature, expected) { + return NewVerificationError("invalid HMAC signature") + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go b/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go new file mode 100644 index 000000000..cf0bd4ac4 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go @@ -0,0 +1,76 @@ +// Package ecutil defines tools that help with elliptic curve related +// computation +package ecutil + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +const ( + // size of buffer that needs to be allocated for EC521 curve + ec521BufferSize = 66 // (521 / 8) + 1 +) + +var ecpointBufferPool = sync.Pool{ + New: func() any { + // In most cases the curve bit size will be less than this length + // so allocate the maximum, and keep reusing + buf := make([]byte, 0, ec521BufferSize) + return &buf + }, +} + +func getCrvFixedBuffer(size int) []byte { + //nolint:forcetypeassert + buf := *(ecpointBufferPool.Get().(*[]byte)) + if size > ec521BufferSize && cap(buf) < size { + buf = append(buf, make([]byte, size-cap(buf))...) + } + return buf[:size] +} + +// ReleaseECPointBuffer releases the []byte buffer allocated. +func ReleaseECPointBuffer(buf []byte) { + buf = buf[:cap(buf)] + buf[0] = 0x0 + for i := 1; i < len(buf); i *= 2 { + copy(buf[i:], buf[:i]) + } + buf = buf[:0] + ecpointBufferPool.Put(&buf) +} + +func CalculateKeySize(crv elliptic.Curve) int { + // We need to create a buffer that fits the entire curve. + // If the curve size is 66, that fits in 9 bytes. If the curve + // size is 64, it fits in 8 bytes. + bits := crv.Params().BitSize + + // For most common cases we know before hand what the byte length + // is going to be. optimize + var inBytes int + switch bits { + case 224, 256, 384: // TODO: use constant? + inBytes = bits / 8 + case 521: + inBytes = ec521BufferSize + default: + inBytes = bits / 8 + if (bits % 8) != 0 { + inBytes++ + } + } + + return inBytes +} + +// AllocECPointBuffer allocates a buffer for the given point in the given +// curve. This buffer should be released using the ReleaseECPointBuffer +// function. +func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte { + buf := getCrvFixedBuffer(CalculateKeySize(crv)) + v.FillBytes(buf) + return buf +} diff --git a/vendor/github.com/lestrrat-go/dsig/rsa.go b/vendor/github.com/lestrrat-go/dsig/rsa.go new file mode 100644 index 000000000..a339fe5b7 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/rsa.go @@ -0,0 +1,63 @@ +package dsig + +import ( + "crypto" + "crypto/rsa" + "fmt" + "io" +) + +func rsaGetSignerCryptoSignerKey(key any) (crypto.Signer, bool, error) { + if !isValidRSAKey(key) { + return nil, false, fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + return cs, true, nil + } + return nil, false, nil +} + +// rsaPSSOptions returns the PSS options for RSA-PSS signatures with the specified hash. +// The salt length is set to equal the hash length as per RFC 7518. +func rsaPSSOptions(h crypto.Hash) rsa.PSSOptions { + return rsa.PSSOptions{ + Hash: h, + SaltLength: rsa.PSSSaltLengthEqualsHash, + } +} + +// SignRSA generates an RSA signature for the given payload using the specified private key and options. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// If pss is true, RSA-PSS is used; otherwise, PKCS#1 v1.5 is used. +// +// The rr parameter is an optional io.Reader that can be used to provide randomness for signing. +// If rr is nil, it defaults to rand.Reader. +func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) { + if !isValidRSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + var opts crypto.SignerOpts = h + if pss { + rsaopts := rsaPSSOptions(h) + opts = &rsaopts + } + return cryptosign(key, payload, h, opts, rr) +} + +// VerifyRSA verifies an RSA signature for the given payload and header. +// This function constructs the signing input by encoding the header and payload according to JWS specification, +// then verifies the signature using the specified public key and hash algorithm. +// If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used. +func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error { + if !isValidRSAKey(key) { + return fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + hasher := h.New() + hasher.Write(payload) + digest := hasher.Sum(nil) + if pss { + return rsa.VerifyPSS(key, h, digest, signature, &rsa.PSSOptions{Hash: h, SaltLength: rsa.PSSSaltLengthEqualsHash}) + } + return rsa.VerifyPKCS1v15(key, h, digest, signature) +} diff --git a/vendor/github.com/lestrrat-go/dsig/sign.go b/vendor/github.com/lestrrat-go/dsig/sign.go new file mode 100644 index 000000000..e2a6bde29 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/sign.go @@ -0,0 +1,100 @@ +package dsig + +import ( + "crypto" + "crypto/rsa" + "fmt" + "io" +) + +// Sign generates a digital signature using the specified key and algorithm. +// +// This function loads the signer registered in the dsig package _ONLY_. +// It does not support custom signers that the user might have registered. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +// Not all algorithms require this parameter, but it is included for consistency. +// 99% of the time, you can pass nil for rr, and it will work fine. +func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { + info, ok := GetAlgorithmInfo(alg) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: unsupported signature algorithm %q`, alg) + } + + switch info.Family { + case HMAC: + return dispatchHMACSign(key, info, payload) + case RSA: + return dispatchRSASign(key, info, payload, rr) + case ECDSA: + return dispatchECDSASign(key, info, payload, rr) + case EdDSAFamily: + return dispatchEdDSASign(key, info, payload, rr) + default: + return nil, fmt.Errorf(`dsig.Sign: unsupported signature family %q`, info.Family) + } +} + +func dispatchHMACSign(key any, info AlgorithmInfo, payload []byte) ([]byte, error) { + meta, ok := info.Meta.(HMACFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid HMAC metadata`) + } + + var hmackey []byte + if err := toHMACKey(&hmackey, key); err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + return SignHMAC(hmackey, payload, meta.HashFunc) +} + +func dispatchRSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + meta, ok := info.Meta.(RSAFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid RSA metadata`) + } + + cs, isCryptoSigner, err := rsaGetSignerCryptoSignerKey(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + if isCryptoSigner { + var options crypto.SignerOpts = meta.Hash + if meta.PSS { + rsaopts := rsaPSSOptions(meta.Hash) + options = &rsaopts + } + return SignCryptoSigner(cs, payload, meta.Hash, options, rr) + } + + privkey, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid key type %T. *rsa.PrivateKey is required`, key) + } + return SignRSA(privkey, payload, meta.Hash, meta.PSS, rr) +} + +func dispatchEdDSASign(key any, _ AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + signer, err := eddsaGetSigner(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + + return SignCryptoSigner(signer, payload, crypto.Hash(0), crypto.Hash(0), rr) +} + +func dispatchECDSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + meta, ok := info.Meta.(ECDSAFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid ECDSA metadata`) + } + + privkey, cs, isCryptoSigner, err := ecdsaGetSignerKey(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + if isCryptoSigner { + return SignECDSACryptoSigner(cs, payload, meta.Hash, rr) + } + return SignECDSA(privkey, payload, meta.Hash, rr) +} diff --git a/vendor/github.com/lestrrat-go/dsig/validation.go b/vendor/github.com/lestrrat-go/dsig/validation.go new file mode 100644 index 000000000..17682d853 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/validation.go @@ -0,0 +1,66 @@ +package dsig + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" +) + +// isValidRSAKey validates that the provided key type is appropriate for RSA algorithms. +// It returns false if the key is clearly incompatible (e.g., ECDSA or EdDSA keys). +func isValidRSAKey(key any) bool { + switch key.(type) { + case + ecdsa.PrivateKey, *ecdsa.PrivateKey, + ed25519.PrivateKey: + // these are NOT ok for RSA algorithms + return false + } + return true +} + +// isValidECDSAKey validates that the provided key type is appropriate for ECDSA algorithms. +// It returns false if the key is clearly incompatible (e.g., RSA or EdDSA keys). +func isValidECDSAKey(key any) bool { + switch key.(type) { + case + ed25519.PrivateKey, + rsa.PrivateKey, *rsa.PrivateKey: + // these are NOT ok for ECDSA algorithms + return false + } + return true +} + +// isValidEDDSAKey validates that the provided key type is appropriate for EdDSA algorithms. +// It returns false if the key is clearly incompatible (e.g., RSA or ECDSA keys). +func isValidEDDSAKey(key any) bool { + switch key.(type) { + case + ecdsa.PrivateKey, *ecdsa.PrivateKey, + rsa.PrivateKey, *rsa.PrivateKey: + // these are NOT ok for EdDSA algorithms + return false + } + return true +} + +// VerificationError represents an error that occurred during signature verification. +type VerificationError struct { + message string +} + +func (e *VerificationError) Error() string { + return e.message +} + +// NewVerificationError creates a new verification error with the given message. +func NewVerificationError(message string) error { + return &VerificationError{message: message} +} + +// IsVerificationError checks if the given error is a verification error. +func IsVerificationError(err error) bool { + _, ok := err.(*VerificationError) + return ok +} diff --git a/vendor/github.com/lestrrat-go/dsig/verify.go b/vendor/github.com/lestrrat-go/dsig/verify.go new file mode 100644 index 000000000..86085b0a3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/verify.go @@ -0,0 +1,134 @@ +package dsig + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "fmt" +) + +// Verify verifies a digital signature using the specified key and algorithm. +// +// This function loads the verifier registered in the dsig package _ONLY_. +// It does not support custom verifiers that the user might have registered. +func Verify(key any, alg string, payload, signature []byte) error { + info, ok := GetAlgorithmInfo(alg) + if !ok { + return fmt.Errorf(`dsig.Verify: unsupported signature algorithm %q`, alg) + } + + switch info.Family { + case HMAC: + return dispatchHMACVerify(key, info, payload, signature) + case RSA: + return dispatchRSAVerify(key, info, payload, signature) + case ECDSA: + return dispatchECDSAVerify(key, info, payload, signature) + case EdDSAFamily: + return dispatchEdDSAVerify(key, info, payload, signature) + default: + return fmt.Errorf(`dsig.Verify: unsupported signature family %q`, info.Family) + } +} + +func dispatchHMACVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(HMACFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid HMAC metadata`) + } + + var hmackey []byte + if err := toHMACKey(&hmackey, key); err != nil { + return fmt.Errorf(`dsig.Verify: %w`, err) + } + return VerifyHMAC(hmackey, payload, signature, meta.HashFunc) +} + +func dispatchRSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(RSAFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid RSA metadata`) + } + + var pubkey *rsa.PublicKey + + if cs, ok := key.(crypto.Signer); ok { + cpub := cs.Public() + switch cpub := cpub.(type) { + case rsa.PublicKey: + pubkey = &cpub + case *rsa.PublicKey: + pubkey = cpub + default: + return fmt.Errorf(`dsig.Verify: failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key) + } + } else { + var ok bool + pubkey, ok = key.(*rsa.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: failed to retrieve *rsa.PublicKey out of %T`, key) + } + } + + return VerifyRSA(pubkey, payload, signature, meta.Hash, meta.PSS) +} + +func dispatchECDSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(ECDSAFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid ECDSA metadata`) + } + + pubkey, cs, isCryptoSigner, err := ecdsaGetVerifierKey(key) + if err != nil { + return fmt.Errorf(`dsig.Verify: %w`, err) + } + if isCryptoSigner { + return VerifyECDSACryptoSigner(cs, payload, signature, meta.Hash) + } + return VerifyECDSA(pubkey, payload, signature, meta.Hash) +} + +func dispatchEdDSAVerify(key any, _ AlgorithmInfo, payload, signature []byte) error { + var pubkey ed25519.PublicKey + signer, ok := key.(crypto.Signer) + if ok { + v := signer.Public() + pubkey, ok = v.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v) + } + } else { + var ok bool + pubkey, ok = key.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: failed to retrieve ed25519.PublicKey out of %T`, key) + } + } + + return VerifyEdDSA(pubkey, payload, signature) +} + +func ecdsaGetVerifierKey(key any) (*ecdsa.PublicKey, crypto.Signer, bool, error) { + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + switch key.(type) { + case ecdsa.PublicKey, *ecdsa.PublicKey: + // if it's ecdsa.PublicKey, it's more efficient to + // go through the non-crypto.Signer route. Set isCryptoSigner to false + isCryptoSigner = false + } + } + + if isCryptoSigner { + return nil, cs, true, nil + } + + pubkey, ok := key.(*ecdsa.PublicKey) + if !ok { + return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PublicKey is required`, key) + } + + return pubkey, nil, false, nil +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml b/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml index a51b41b4a..b3af8cfe1 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml +++ b/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml @@ -29,6 +29,7 @@ linters: - nakedret - nestif - nlreturn + - noinlineerr - nonamedreturns - paralleltest - tagliatelle @@ -37,6 +38,7 @@ linters: - varnamelen - wrapcheck - wsl + - wsl_v5 settings: govet: disable: diff --git a/vendor/github.com/lestrrat-go/httprc/v3/Changes b/vendor/github.com/lestrrat-go/httprc/v3/Changes index 3c82040d4..4dc6f9f4b 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/Changes +++ b/vendor/github.com/lestrrat-go/httprc/v3/Changes @@ -1,7 +1,10 @@ Changes ======= -v3.0.0 UNRELEASED +v3.0.1 18 Aug 2025 +* Refresh() no longer requires the resource to be ready. + +v3.0.0 5 Jun 2025 [Breaking Changes] * The entire API has been re-imagined for Go versions that allow typed parameters diff --git a/vendor/github.com/lestrrat-go/httprc/v3/backend.go b/vendor/github.com/lestrrat-go/httprc/v3/backend.go index 7d4fb496a..31f9fc07d 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/backend.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/backend.go @@ -71,6 +71,7 @@ func (c *ctrlBackend) refreshResource(ctx context.Context, req refreshRequest) { c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] START %q", req.u)) defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] END %q", req.u)) u := req.u + r, ok := c.items[u] if !ok { c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] %s is not registered", req.u)) @@ -78,12 +79,9 @@ func (c *ctrlBackend) refreshResource(ctx context.Context, req refreshRequest) { return } - // Make sure it's ready - if err := r.Ready(ctx); err != nil { - c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] %s did not become ready: %v", req.u, err)) - sendReply(ctx, req.reply, struct{}{}, err) - return - } + // Note: We don't wait for r.Ready() here because refresh should work + // regardless of whether the resource has been fetched before. This allows + // refresh to work with resources registered using WithWaitReady(false). r.SetNext(time.Unix(0, 0)) sendWorkerSynchronous(ctx, c.syncoutgoing, synchronousRequest{ diff --git a/vendor/github.com/lestrrat-go/httprc/v3/worker.go b/vendor/github.com/lestrrat-go/httprc/v3/worker.go index 57d7fb962..d11477dad 100644 --- a/vendor/github.com/lestrrat-go/httprc/v3/worker.go +++ b/vendor/github.com/lestrrat-go/httprc/v3/worker.go @@ -39,10 +39,12 @@ func (w worker) Run(ctx context.Context, readywg *sync.WaitGroup, donewg *sync.W case sr := <-w.nextsync: w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: syncing %q (synchronous)", sr.resource.URL())) if err := sr.resource.Sync(ctx); err != nil { + w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: FAILED to sync %q (synchronous): %s", sr.resource.URL(), err)) sendReply(ctx, sr.reply, struct{}{}, err) sr.resource.SetBusy(false) return } + w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: SUCCESS syncing %q (synchronous)", sr.resource.URL())) sr.resource.SetBusy(false) sendReply(ctx, sr.reply, struct{}{}, nil) w.sendAdjustIntervalRequest(ctx, sr.resource) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Changes b/vendor/github.com/lestrrat-go/jwx/v3/Changes index 77381cf19..29910bf35 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/Changes +++ b/vendor/github.com/lestrrat-go/jwx/v3/Changes @@ -4,6 +4,20 @@ Changes v3 has many incompatibilities with v2. To see the full list of differences between v2 and v3, please read the Changes-v3.md file (https://github.com/lestrrat-go/jwx/blob/develop/v3/Changes-v3.md) +v3.0.11 14 Sep 2025 + * [jwk] Add `(jwk.Cache).Shutdown()` method that delegates to the httprc controller + object, to shutdown the cache. + * [jwk] Change timing of `res.Body.Close()` call + * [jwe] Previously, ecdh.PrivateKey/ecdh.PublicKey were not properly handled + when used for encryption, which has been fixed. + * [jws/jwsbb] (EXPERIMENTAL/BREAKS COMPATIBILITY) Convert most functions into + thin wrappers around functions from github.com/lestrrat-go/dsig package. + As a related change, HAMCHashFuncFor/RSAHashFuncFor/ECDSAHashFuncFor/RSAPSSOptions + have been removed or unexported. + Users of this module should be using jwsbb.Sign() and jwsbb.Verify() instead of + algorithm specific jwsbb.SignRSA()/jwsbb.VerifyRSA() and such. If you feel the + need to use these functions, you should use github.com/lestrrat-go/dsig directly. + v3.0.10 04 Aug 2025 * [jws/jwsbb] Add `jwsbb.ErrHeaderNotFound()` to return the same error type as when a non-existent header is requested. via `HeaderGetXXX()` functions. Previously, this diff --git a/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel index 004fb2a9a..167e9b5c8 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel +++ b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel @@ -9,9 +9,9 @@ bazel_dep(name = "rules_go", version = "0.55.1") bazel_dep(name = "gazelle", version = "0.44.0") bazel_dep(name = "aspect_bazel_lib", version = "2.11.0") -# Go SDK setup - using Go 1.23.6 to match the toolchain in go.mod +# Go SDK setup - using Go 1.24.4 to match the toolchain in go.mod go_sdk = use_extension("@rules_go//go:extensions.bzl", "go_sdk") -go_sdk.download(version = "1.23.6") +go_sdk.download(version = "1.24.4") # Go dependencies from go.mod go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps") @@ -23,6 +23,8 @@ use_repo( "com_github_decred_dcrd_dcrec_secp256k1_v4", "com_github_goccy_go_json", "com_github_lestrrat_go_blackmagic", + "com_github_lestrrat_go_dsig", + "com_github_lestrrat_go_dsig_secp256k1", "com_github_lestrrat_go_httprc_v3", "com_github_lestrrat_go_option_v2", "com_github_segmentio_asm", diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go index b911839c5..751fd8f05 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go @@ -5,8 +5,10 @@ import ( "crypto/ecdh" "crypto/ecdsa" "crypto/ed25519" + "crypto/elliptic" "crypto/rsa" "fmt" + "math/big" "github.com/lestrrat-go/blackmagic" "github.com/lestrrat-go/jwx/v3/jwk" @@ -263,3 +265,90 @@ func ECDHPublicKey(dst, src any) error { return blackmagic.AssignIfCompatible(dst, pubECDH) } + +// ecdhCurveToElliptic maps ECDH curves to elliptic curves +func ecdhCurveToElliptic(ecdhCurve ecdh.Curve) (elliptic.Curve, error) { + switch ecdhCurve { + case ecdh.P256(): + return elliptic.P256(), nil + case ecdh.P384(): + return elliptic.P384(), nil + case ecdh.P521(): + return elliptic.P521(), nil + default: + return nil, fmt.Errorf(`keyconv: unsupported ECDH curve: %v`, ecdhCurve) + } +} + +// ecdhPublicKeyToECDSA converts an ECDH public key to an ECDSA public key +func ecdhPublicKeyToECDSA(ecdhPubKey *ecdh.PublicKey) (*ecdsa.PublicKey, error) { + curve, err := ecdhCurveToElliptic(ecdhPubKey.Curve()) + if err != nil { + return nil, err + } + + pubBytes := ecdhPubKey.Bytes() + + // Parse the uncompressed point format (0x04 prefix + X + Y coordinates) + if len(pubBytes) == 0 || pubBytes[0] != 0x04 { + return nil, fmt.Errorf(`keyconv: invalid ECDH public key format`) + } + + keyLen := (len(pubBytes) - 1) / 2 + if len(pubBytes) != 1+2*keyLen { + return nil, fmt.Errorf(`keyconv: invalid ECDH public key length`) + } + + x := new(big.Int).SetBytes(pubBytes[1 : 1+keyLen]) + y := new(big.Int).SetBytes(pubBytes[1+keyLen:]) + + return &ecdsa.PublicKey{ + Curve: curve, + X: x, + Y: y, + }, nil +} + +func ECDHToECDSA(dst, src any) error { + // convert ecdh.PublicKey to ecdsa.PublicKey, ecdh.PrivateKey to ecdsa.PrivateKey + + // First, handle value types by converting to pointers + switch s := src.(type) { + case ecdh.PrivateKey: + src = &s + case ecdh.PublicKey: + src = &s + } + + var privBytes []byte + var pubkey *ecdh.PublicKey + // Now handle the actual conversion with pointer types + switch src := src.(type) { + case *ecdh.PrivateKey: + pubkey = src.PublicKey() + privBytes = src.Bytes() + case *ecdh.PublicKey: + pubkey = src + default: + return fmt.Errorf(`keyconv: expected ecdh.PrivateKey, *ecdh.PrivateKey, ecdh.PublicKey, or *ecdh.PublicKey, got %T`, src) + } + + // convert the public key + ecdsaPubKey, err := ecdhPublicKeyToECDSA(pubkey) + if err != nil { + return fmt.Errorf(`keyconv.ECDHToECDSA: failed to convert ECDH public key to ECDSA public key: %w`, err) + } + + // return if we were being asked to convert *ecdh.PublicKey + if privBytes == nil { + return blackmagic.AssignIfCompatible(dst, ecdsaPubKey) + } + + // Then create the private key with the public key embedded + ecdsaPrivKey := &ecdsa.PrivateKey{ + D: new(big.Int).SetBytes(privBytes), + PublicKey: *ecdsaPubKey, + } + + return blackmagic.AssignIfCompatible(dst, ecdsaPrivKey) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go index a71e71689..e75f342a3 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go @@ -96,23 +96,48 @@ func (e *encrypter) EncryptKey(cek []byte) (keygen.ByteSource, error) { keyToUse = e.pubkey } - // Handle ecdsa.PublicKey by value - convert to pointer - if pk, ok := keyToUse.(ecdsa.PublicKey); ok { - keyToUse = &pk + switch key := keyToUse.(type) { + case *ecdsa.PublicKey: + // no op + case ecdsa.PublicKey: + keyToUse = &key + case *ecdsa.PrivateKey: + keyToUse = &key.PublicKey + case ecdsa.PrivateKey: + keyToUse = &key.PublicKey + case *ecdh.PublicKey: + // no op + case ecdh.PublicKey: + keyToUse = &key + case ecdh.PrivateKey: + keyToUse = key.PublicKey() + case *ecdh.PrivateKey: + keyToUse = key.PublicKey() } // Determine key type and call appropriate function + switch key := keyToUse.(type) { + case *ecdh.PublicKey: + if key.Curve() == ecdh.X25519() { + if !keywrap { + return jwebb.KeyEncryptECDHESX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) + } + return jwebb.KeyEncryptECDHESKeyWrapX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) + } + + var ecdsaKey *ecdsa.PublicKey + if err := keyconv.ECDHToECDSA(&ecdsaKey, key); err != nil { + return nil, fmt.Errorf(`encrypt: failed to convert ECDH public key to ECDSA: %w`, err) + } + keyToUse = ecdsaKey + } + switch key := keyToUse.(type) { case *ecdsa.PublicKey: if !keywrap { return jwebb.KeyEncryptECDHESECDSA(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) } return jwebb.KeyEncryptECDHESKeyWrapECDSA(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) - case *ecdh.PublicKey: - if !keywrap { - return jwebb.KeyEncryptECDHESX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) - } - return jwebb.KeyEncryptECDHESKeyWrapX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) default: return nil, fmt.Errorf(`encrypt: unsupported key type for ECDH-ES: %T`, keyToUse) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go index d827668e1..b83b56c79 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go @@ -223,6 +223,10 @@ func (c *Cache) Unregister(ctx context.Context, u string) error { return c.ctrl.Remove(ctx, u) } +func (c *Cache) Shutdown(ctx context.Context) error { + return c.ctrl.ShutdownContext(ctx) +} + // CachedSet is a thin shim over jwk.Cache that allows the user to cloak // jwk.Cache as if it's a `jwk.Set`. Behind the scenes, the `jwk.Set` is // retrieved from the `jwk.Cache` for every operation. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go index 2b7917fd2..910a2101d 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go @@ -102,13 +102,13 @@ func Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) { if err != nil { return nil, fmt.Errorf(`jwk.Fetch: request failed: %w`, err) } + defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, fmt.Errorf(`jwk.Fetch: request returned status %d, expected 200`, res.StatusCode) } buf, err := io.ReadAll(res.Body) - defer res.Body.Close() if err != nil { return nil, fmt.Errorf(`jwk.Fetch: failed to read response body for %q: %w`, u, err) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go index ff5a1d8f7..3b68c4614 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go @@ -8,5 +8,6 @@ import ( ) func init() { + // Register ES256K to EC algorithm family addAlgorithmForKeyType(jwa.EC(), jwa.ES256K()) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go index b41b2758b..1fa77438b 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go @@ -19,6 +19,10 @@ // To verify, use `jws.Verify`. It will parse the `encodedjws` buffer // and verify the result using `algorithm` and `key`. Upon successful // verification, the original payload is returned, so you can work on it. +// +// As a sidenote, consider using github.com/lestrrat-go/htmsig if you +// looking for HTTP Message Signatures (RFC9421) -- it uses the same +// underlying signing/verification mechanisms as this module. package jws import ( diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel index f8fcdac4a..0799e8111 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//internal/pool", "//internal/tokens", "//jws/internal/keytype", + "@com_github_lestrrat_go_dsig//:dsig", ], ) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go index bd7ef4adc..1eb492ee7 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go @@ -3,59 +3,27 @@ package jwsbb import ( "crypto" "crypto/ecdsa" - "crypto/rand" "encoding/asn1" "fmt" "io" "math/big" + "github.com/lestrrat-go/dsig" "github.com/lestrrat-go/jwx/v3/internal/ecutil" - "github.com/lestrrat-go/jwx/v3/internal/keyconv" - "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" ) -var ecdsaHashFuncs = map[string]crypto.Hash{ - "ES256": crypto.SHA256, - "ES256K": crypto.SHA256, - "ES384": crypto.SHA384, - "ES512": crypto.SHA512, -} - -func isSuppotedECDSAAlgorithm(alg string) bool { - _, ok := ecdsaHashFuncs[alg] - return ok -} - -func ECDSAHashFuncFor(alg string) (crypto.Hash, error) { - if h, ok := ecdsaHashFuncs[alg]; ok { - return h, nil - } - return 0, fmt.Errorf(`unsupported ECDSA algorithm %s`, alg) -} - -func ecdsaGetSignerKey(key any) (*ecdsa.PrivateKey, crypto.Signer, bool, error) { - cs, isCryptoSigner := key.(crypto.Signer) - if isCryptoSigner { - if !keytype.IsValidECDSAKey(key) { - return nil, nil, false, fmt.Errorf(`cannot use key of type %T`, key) - } - switch key.(type) { - case ecdsa.PrivateKey, *ecdsa.PrivateKey: - // if it's ecdsa.PrivateKey, it's more efficient to - // go through the non-crypto.Signer route. Set isCryptoSigner to false - isCryptoSigner = false - } - } - - if isCryptoSigner { - return nil, cs, true, nil - } - - var privkey *ecdsa.PrivateKey - if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil { - return nil, nil, false, fmt.Errorf(`invalid key type %T. ecdsa.PrivateKey is required: %w`, key, err) +// ecdsaHashToDsigAlgorithm maps ECDSA hash functions to dsig algorithm constants +func ecdsaHashToDsigAlgorithm(h crypto.Hash) (string, error) { + switch h { + case crypto.SHA256: + return dsig.ECDSAWithP256AndSHA256, nil + case crypto.SHA384: + return dsig.ECDSAWithP384AndSHA384, nil + case crypto.SHA512: + return dsig.ECDSAWithP521AndSHA512, nil + default: + return "", fmt.Errorf("unsupported ECDSA hash function: %v", h) } - return privkey, nil, false, nil } // UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values. @@ -120,24 +88,16 @@ func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error // The raw parameter should be the pre-computed signing input (typically header.payload). // // rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader. +// +// This function is now a thin wrapper around dsig.SignECDSA. For new projects, you should +// consider using dsig instead of this function. func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { - hh := h.New() - if _, err := hh.Write(payload); err != nil { - return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err) - } - digest := hh.Sum(nil) - - if rr == nil { - rr = rand.Reader - } - - // Sign and get r, s values - r, s, err := ecdsa.Sign(rr, key, digest) + dsigAlg, err := ecdsaHashToDsigAlgorithm(h) if err != nil { - return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err) + return nil, fmt.Errorf("jwsbb.SignECDSA: %w", err) } - return PackECDSASignature(r, s, key.Curve.Params().BitSize) + return dsig.Sign(key, dsigAlg, payload, rr) } // SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface. @@ -183,13 +143,16 @@ func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) // VerifyECDSA verifies an ECDSA signature for the given payload. // This function verifies the signature using the specified public key and hash algorithm. // The payload parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.VerifyECDSA. For new projects, you should +// consider using dsig instead of this function. func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error { - var r, s big.Int - if err := UnpackECDSASignature(signature, key, &r, &s); err != nil { - return fmt.Errorf("jwsbb.ECDSAVerifier: failed to unpack ECDSA signature: %w", err) + dsigAlg, err := ecdsaHashToDsigAlgorithm(h) + if err != nil { + return fmt.Errorf("jwsbb.VerifyECDSA: %w", err) } - return ecdsaVerify(key, payload, h, &r, &s) + return dsig.Verify(key, dsigAlg, payload, signature) } // VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go index a5e41d2ec..960cf97dd 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go @@ -1,52 +1,30 @@ package jwsbb import ( - "crypto" "crypto/ed25519" - "fmt" - "github.com/lestrrat-go/jwx/v3/internal/keyconv" - "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" + "github.com/lestrrat-go/dsig" ) -func isSupportedEdDSAAlgorithm(alg string) bool { - return alg == "EdDSA" -} - -func eddsaGetSigner(key any) (crypto.Signer, error) { - // The ed25519.PrivateKey object implements crypto.Signer, so we should - // simply accept a crypto.Signer here. - signer, ok := key.(crypto.Signer) - if ok { - if !keytype.IsValidEDDSAKey(key) { - return nil, fmt.Errorf(`cannot use key of type %T to generate EdDSA based signatures`, key) - } - return signer, nil - } - - // This fallback exists for cases when jwk.Key was passed, or - // users gave us a pointer instead of non-pointer, etc. - var privkey ed25519.PrivateKey - if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil { - return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T: %w`, key, err) - } - return privkey, nil -} - // SignEdDSA generates an EdDSA (Ed25519) signature for the given payload. // The raw parameter should be the pre-computed signing input (typically header.payload). // EdDSA is deterministic and doesn't require additional hashing of the input. +// +// This function is now a thin wrapper around dsig.SignEdDSA. For new projects, you should +// consider using dsig instead of this function. func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) { - return ed25519.Sign(key, payload), nil + // Use dsig.Sign with EdDSA algorithm constant + return dsig.Sign(key, dsig.EdDSA, payload, nil) } // VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload. // This function verifies the signature using Ed25519 verification algorithm. // The payload parameter should be the pre-computed signing input (typically header.payload). // EdDSA is deterministic and provides strong security guarantees without requiring hash function selection. +// +// This function is now a thin wrapper around dsig.VerifyEdDSA. For new projects, you should +// consider using dsig instead of this function. func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error { - if !ed25519.Verify(key, payload, signature) { - return fmt.Errorf("invalid EdDSA signature") - } - return nil + // Use dsig.Verify with EdDSA algorithm constant + return dsig.Verify(key, dsig.EdDSA, payload, signature) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go new file mode 100644 index 000000000..a8761ee0f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go @@ -0,0 +1,14 @@ +//go:build jwx_es256k + +package jwsbb + +import ( + dsigsecp256k1 "github.com/lestrrat-go/dsig-secp256k1" +) + +const es256k = "ES256K" + +func init() { + // Add ES256K mapping when this build tag is enabled + jwsToDsigAlgorithm[es256k] = dsigsecp256k1.ECDSAWithSecp256k1AndSHA256 +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go index 782915ff2..8e70eb667 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go @@ -1,67 +1,52 @@ package jwsbb import ( - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" "fmt" "hash" - "github.com/lestrrat-go/jwx/v3/internal/keyconv" + "github.com/lestrrat-go/dsig" ) -var hmacHashFuncs = map[string]func() hash.Hash{ - "HS256": sha256.New, - "HS384": sha512.New384, - "HS512": sha512.New, -} - -func isSupportedHMACAlgorithm(alg string) bool { - _, ok := hmacHashFuncs[alg] - return ok -} - -// HMACHashFuncFor returns the appropriate hash function for the given HMAC algorithm. -// Supported algorithms: HS256 (SHA-256), HS384 (SHA-384), HS512 (SHA-512). -// Returns the hash function constructor and an error if the algorithm is unsupported. -func HMACHashFuncFor(alg string) (func() hash.Hash, error) { - if h, ok := hmacHashFuncs[alg]; ok { - return h, nil +// hmacHashToDsigAlgorithm maps HMAC hash function sizes to dsig algorithm constants +func hmacHashToDsigAlgorithm(hfunc func() hash.Hash) (string, error) { + h := hfunc() + switch h.Size() { + case 32: // SHA256 + return dsig.HMACWithSHA256, nil + case 48: // SHA384 + return dsig.HMACWithSHA384, nil + case 64: // SHA512 + return dsig.HMACWithSHA512, nil + default: + return "", fmt.Errorf("unsupported HMAC hash function: size=%d", h.Size()) } - return nil, fmt.Errorf("unsupported HMAC algorithm %s", alg) -} - -func toHMACKey(dst *[]byte, key any) error { - if err := keyconv.ByteSliceKey(dst, key); err != nil { - return fmt.Errorf(`jws.toHMACKey: invalid key type %T. []byte is required: %w`, key, err) - } - - if len(*dst) == 0 { - return fmt.Errorf(`jws.toHMACKey: missing key while signing payload`) - } - return nil } // SignHMAC generates an HMAC signature for the given payload using the specified hash function and key. // The raw parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.SignHMAC. For new projects, you should +// consider using dsig instead of this function. func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) { - h := hmac.New(hfunc, key) - if _, err := h.Write(payload); err != nil { - return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err) + dsigAlg, err := hmacHashToDsigAlgorithm(hfunc) + if err != nil { + return nil, fmt.Errorf("jwsbb.SignHMAC: %w", err) } - return h.Sum(nil), nil + + return dsig.Sign(key, dsigAlg, payload, nil) } // VerifyHMAC verifies an HMAC signature for the given payload. // This function verifies the signature using the specified key and hash function. // The payload parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.VerifyHMAC. For new projects, you should +// consider using dsig instead of this function. func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error { - expected, err := SignHMAC(key, payload, hfunc) + dsigAlg, err := hmacHashToDsigAlgorithm(hfunc) if err != nil { - return fmt.Errorf("failed to sign payload for verification: %w", err) - } - if !hmac.Equal(signature, expected) { - return fmt.Errorf("invalid HMAC signature") + return fmt.Errorf("jwsbb.VerifyHMAC: %w", err) } - return nil + + return dsig.Verify(key, dsigAlg, payload, signature) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go index 51d01df63..6a67ee8f8 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go @@ -12,8 +12,40 @@ // 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid. // 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to). // 3. Does not rely on other public jwx packages (they are standalone, except for internal packages). +// +// This implementation uses github.com/lestrrat-go/dsig as the underlying signature provider. package jwsbb +import ( + "github.com/lestrrat-go/dsig" +) + +// JWS algorithm name constants +const ( + // HMAC algorithms + hs256 = "HS256" + hs384 = "HS384" + hs512 = "HS512" + + // RSA PKCS#1 v1.5 algorithms + rs256 = "RS256" + rs384 = "RS384" + rs512 = "RS512" + + // RSA PSS algorithms + ps256 = "PS256" + ps384 = "PS384" + ps512 = "PS512" + + // ECDSA algorithms + es256 = "ES256" + es384 = "ES384" + es512 = "ES512" + + // EdDSA algorithm + edDSA = "EdDSA" +) + // Signer is a generic interface that defines the method for signing payloads. // The type parameter K represents the key type (e.g., []byte for HMAC keys, // *rsa.PrivateKey for RSA keys, *ecdsa.PrivateKey for ECDSA keys). @@ -27,3 +59,36 @@ type Signer[K any] interface { type Verifier[K any] interface { Verify(key K, buf []byte, signature []byte) error } + +// JWS to dsig algorithm mapping +var jwsToDsigAlgorithm = map[string]string{ + // HMAC algorithms + hs256: dsig.HMACWithSHA256, + hs384: dsig.HMACWithSHA384, + hs512: dsig.HMACWithSHA512, + + // RSA PKCS#1 v1.5 algorithms + rs256: dsig.RSAPKCS1v15WithSHA256, + rs384: dsig.RSAPKCS1v15WithSHA384, + rs512: dsig.RSAPKCS1v15WithSHA512, + + // RSA PSS algorithms + ps256: dsig.RSAPSSWithSHA256, + ps384: dsig.RSAPSSWithSHA384, + ps512: dsig.RSAPSSWithSHA512, + + // ECDSA algorithms + es256: dsig.ECDSAWithP256AndSHA256, + es384: dsig.ECDSAWithP384AndSHA384, + es512: dsig.ECDSAWithP521AndSHA512, + // Note: ES256K requires external dependency and is handled separately + + // EdDSA algorithm + edDSA: dsig.EdDSA, +} + +// getDsigAlgorithm returns the dsig algorithm name for a JWS algorithm +func getDsigAlgorithm(jwsAlg string) (string, bool) { + dsigAlg, ok := jwsToDsigAlgorithm[jwsAlg] + return dsigAlg, ok +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go index f3083dcea..36997cef7 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go @@ -6,53 +6,33 @@ import ( "fmt" "io" - "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" + "github.com/lestrrat-go/dsig" ) -func rsaGetSignerCryptoSignerKey(key any) (crypto.Signer, bool, error) { - cs, isCryptoSigner := key.(crypto.Signer) - if isCryptoSigner { - if !keytype.IsValidRSAKey(key) { - return nil, false, fmt.Errorf(`cannot use key of type %T`, key) +// rsaHashToDsigAlgorithm maps RSA hash functions to dsig algorithm constants +func rsaHashToDsigAlgorithm(h crypto.Hash, pss bool) (string, error) { + if pss { + switch h { + case crypto.SHA256: + return dsig.RSAPSSWithSHA256, nil + case crypto.SHA384: + return dsig.RSAPSSWithSHA384, nil + case crypto.SHA512: + return dsig.RSAPSSWithSHA512, nil + default: + return "", fmt.Errorf("unsupported hash algorithm for RSA-PSS: %v", h) + } + } else { + switch h { + case crypto.SHA256: + return dsig.RSAPKCS1v15WithSHA256, nil + case crypto.SHA384: + return dsig.RSAPKCS1v15WithSHA384, nil + case crypto.SHA512: + return dsig.RSAPKCS1v15WithSHA512, nil + default: + return "", fmt.Errorf("unsupported hash algorithm for RSA PKCS#1 v1.5: %v", h) } - return cs, true, nil - } - return nil, false, nil -} - -var rsaHashFuncs = map[string]struct { - Hash crypto.Hash - PSS bool // whether to use PSS padding -}{ - "RS256": {Hash: crypto.SHA256, PSS: false}, - "RS384": {Hash: crypto.SHA384, PSS: false}, - "RS512": {Hash: crypto.SHA512, PSS: false}, - "PS256": {Hash: crypto.SHA256, PSS: true}, - "PS384": {Hash: crypto.SHA384, PSS: true}, - "PS512": {Hash: crypto.SHA512, PSS: true}, -} - -func isSuppotedRSAAlgorithm(alg string) bool { - _, ok := rsaHashFuncs[alg] - return ok -} - -// RSAHashFuncFor returns the appropriate hash function and PSS flag for the given RSA algorithm. -// Supported algorithms: RS256, RS384, RS512 (PKCS#1 v1.5) and PS256, PS384, PS512 (PSS). -// Returns the hash function, PSS flag, and an error if the algorithm is unsupported. -func RSAHashFuncFor(alg string) (crypto.Hash, bool, error) { - if h, ok := rsaHashFuncs[alg]; ok { - return h.Hash, h.PSS, nil - } - return 0, false, fmt.Errorf("unsupported RSA algorithm %s", alg) -} - -// RSAPSSOptions returns the PSS options for RSA-PSS signatures with the specified hash. -// The salt length is set to equal the hash length as per RFC 7518. -func RSAPSSOptions(h crypto.Hash) rsa.PSSOptions { - return rsa.PSSOptions{ - Hash: h, - SaltLength: rsa.PSSSaltLengthEqualsHash, } } @@ -62,25 +42,30 @@ func RSAPSSOptions(h crypto.Hash) rsa.PSSOptions { // // The rr parameter is an optional io.Reader that can be used to provide randomness for signing. // If rr is nil, it defaults to rand.Reader. +// +// This function is now a thin wrapper around dsig.SignRSA. For new projects, you should +// consider using dsig instead of this function. func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) { - var opts crypto.SignerOpts = h - if pss { - rsaopts := RSAPSSOptions(h) - opts = &rsaopts + dsigAlg, err := rsaHashToDsigAlgorithm(h, pss) + if err != nil { + return nil, fmt.Errorf("jwsbb.SignRSA: %w", err) } - return cryptosign(key, payload, h, opts, rr) + + return dsig.Sign(key, dsigAlg, payload, rr) } // VerifyRSA verifies an RSA signature for the given payload and header. // This function constructs the signing input by encoding the header and payload according to JWS specification, // then verifies the signature using the specified public key and hash algorithm. // If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used. +// +// This function is now a thin wrapper around dsig.VerifyRSA. For new projects, you should +// consider using dsig instead of this function. func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error { - hasher := h.New() - hasher.Write(payload) - digest := hasher.Sum(nil) - if pss { - return rsa.VerifyPSS(key, h, digest, signature, &rsa.PSSOptions{Hash: h, SaltLength: rsa.PSSSaltLengthEqualsHash}) + dsigAlg, err := rsaHashToDsigAlgorithm(h, pss) + if err != nil { + return fmt.Errorf("jwsbb.VerifyRSA: %w", err) } - return rsa.VerifyPKCS1v15(key, h, digest, signature) + + return dsig.Verify(key, dsigAlg, payload, signature) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go index 0599ee5df..6f36ab055 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go @@ -2,94 +2,109 @@ package jwsbb import ( "crypto" + "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "fmt" "io" + "github.com/lestrrat-go/dsig" "github.com/lestrrat-go/jwx/v3/internal/keyconv" ) // Sign generates a JWS signature using the specified key and algorithm. // -// This function loads the signer registered in the hwsbb package _ONLY_. +// This function loads the signer registered in the jwsbb package _ONLY_. // It does not support custom signers that the user might have registered. // // rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. // Not all algorithms require this parameter, but it is included for consistency. // 99% of the time, you can pass nil for rr, and it will work fine. func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { - switch { - case isSupportedHMACAlgorithm(alg): - return dispatchHMACSign(key, alg, payload) - case isSuppotedRSAAlgorithm(alg): - return dispatchRSASign(key, alg, payload, rr) - case isSuppotedECDSAAlgorithm(alg): - return dispatchECDSASign(key, alg, payload, rr) - case isSupportedEdDSAAlgorithm(alg): - return dispatchEdDSASign(key, alg, payload, rr) + dsigAlg, ok := getDsigAlgorithm(alg) + if !ok { + return nil, fmt.Errorf(`jwsbb.Sign: unsupported signature algorithm %q`, alg) } - return nil, fmt.Errorf(`jwsbb.Sign: unsupported signature algorithm %q`, alg) -} + // Get dsig algorithm info to determine key conversion strategy + dsigInfo, ok := dsig.GetAlgorithmInfo(dsigAlg) + if !ok { + return nil, fmt.Errorf(`jwsbb.Sign: dsig algorithm %q not registered`, dsigAlg) + } -func dispatchHMACSign(key any, alg string, payload []byte) ([]byte, error) { - h, err := HMACHashFuncFor(alg) - if err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: failed to get hash function for %s: %w`, alg, err) + switch dsigInfo.Family { + case dsig.HMAC: + return dispatchHMACSign(key, dsigAlg, payload) + case dsig.RSA: + return dispatchRSASign(key, dsigAlg, payload, rr) + case dsig.ECDSA: + return dispatchECDSASign(key, dsigAlg, payload, rr) + case dsig.EdDSAFamily: + return dispatchEdDSASign(key, dsigAlg, payload, rr) + default: + return nil, fmt.Errorf(`jwsbb.Sign: unsupported dsig algorithm family %q`, dsigInfo.Family) } +} +func dispatchHMACSign(key any, dsigAlg string, payload []byte) ([]byte, error) { var hmackey []byte - if err := toHMACKey(&hmackey, key); err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: %w`, err) + if err := keyconv.ByteSliceKey(&hmackey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. []byte is required: %w`, key, err) } - return SignHMAC(hmackey, payload, h) + + return dsig.Sign(hmackey, dsigAlg, payload, nil) } -func dispatchRSASign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { - h, pss, err := RSAHashFuncFor(alg) - if err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: failed to get hash function for %s: %w`, alg, err) - } - cs, isCryptoSigner, err := rsaGetSignerCryptoSignerKey(key) - if err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: %w`, err) - } - if isCryptoSigner { - var options crypto.SignerOpts = h - if pss { - rsaopts := RSAPSSOptions(h) - options = &rsaopts +func dispatchRSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an RSA key + if _, ok := signer.Public().(*rsa.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) } - return SignCryptoSigner(cs, payload, h, options, rr) } + // Fall back to concrete key types var privkey *rsa.PrivateKey if err := keyconv.RSAPrivateKey(&privkey, key); err != nil { - return nil, fmt.Errorf(`jws.RSASigner: invalid key type %T. rsa.PrivateKey is required: %w`, key, err) + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. *rsa.PrivateKey is required: %w`, key, err) } - return SignRSA(privkey, payload, h, pss, rr) + + return dsig.Sign(privkey, dsigAlg, payload, rr) } -func dispatchEdDSASign(key any, _ string, payload []byte, rr io.Reader) ([]byte, error) { - signer, err := eddsaGetSigner(key) - if err != nil { - return nil, fmt.Errorf(`jws.EdDSASigner: %w`, err) +func dispatchECDSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an ECDSA key + if _, ok := signer.Public().(*ecdsa.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) + } + } + + // Fall back to concrete key types + var privkey *ecdsa.PrivateKey + if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. *ecdsa.PrivateKey is required: %w`, key, err) } - return SignCryptoSigner(signer, payload, crypto.Hash(0), crypto.Hash(0), rr) + return dsig.Sign(privkey, dsigAlg, payload, rr) } -func dispatchECDSASign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { - h, err := ECDSAHashFuncFor(alg) - if err != nil { - return nil, fmt.Errorf(`jwsbb.Sign: failed to get hash function for %s: %w`, alg, err) - } - privkey, cs, isCryptoSigner, err := ecdsaGetSignerKey(key) - if err != nil { - return nil, fmt.Errorf(`jws.ECDSASigner: %w`, err) +func dispatchEdDSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an EdDSA key + if _, ok := signer.Public().(ed25519.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) + } } - if isCryptoSigner { - return SignECDSACryptoSigner(cs, payload, h, rr) + + // Fall back to concrete key types + var privkey ed25519.PrivateKey + if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. ed25519.PrivateKey is required: %w`, key, err) } - return SignECDSA(privkey, payload, h, rr) + + return dsig.Sign(privkey, dsigAlg, payload, rr) } diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go index 85121199f..bac3ff487 100644 --- a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go @@ -7,6 +7,7 @@ import ( "crypto/rsa" "fmt" + "github.com/lestrrat-go/dsig" "github.com/lestrrat-go/jwx/v3/internal/keyconv" ) @@ -15,113 +16,90 @@ import ( // This function loads the verifier registered in the jwsbb package _ONLY_. // It does not support custom verifiers that the user might have registered. func Verify(key any, alg string, payload, signature []byte) error { - switch { - case isSupportedHMACAlgorithm(alg): - return dispatchHMACVerify(key, alg, payload, signature) - case isSuppotedRSAAlgorithm(alg): - return dispatchRSAVerify(key, alg, payload, signature) - case isSuppotedECDSAAlgorithm(alg): - return dispatchECDSAVerify(key, alg, payload, signature) - case isSupportedEdDSAAlgorithm(alg): - return dispatchEdDSAVerify(key, alg, payload, signature) + dsigAlg, ok := getDsigAlgorithm(alg) + if !ok { + return fmt.Errorf(`jwsbb.Verify: unsupported signature algorithm %q`, alg) } - return fmt.Errorf(`jwsbb.Verify: unsupported signature algorithm %q`, alg) -} - -func dispatchHMACVerify(key any, alg string, payload, signature []byte) error { - h, err := HMACHashFuncFor(alg) - if err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to get hash function for %s: %w`, alg, err) + // Get dsig algorithm info to determine key conversion strategy + dsigInfo, ok := dsig.GetAlgorithmInfo(dsigAlg) + if !ok { + return fmt.Errorf(`jwsbb.Verify: dsig algorithm %q not registered`, dsigAlg) } - var hmackey []byte - if err := toHMACKey(&hmackey, key); err != nil { - return fmt.Errorf(`jwsbb.Verify: %w`, err) + switch dsigInfo.Family { + case dsig.HMAC: + return dispatchHMACVerify(key, dsigAlg, payload, signature) + case dsig.RSA: + return dispatchRSAVerify(key, dsigAlg, payload, signature) + case dsig.ECDSA: + return dispatchECDSAVerify(key, dsigAlg, payload, signature) + case dsig.EdDSAFamily: + return dispatchEdDSAVerify(key, dsigAlg, payload, signature) + default: + return fmt.Errorf(`jwsbb.Verify: unsupported dsig algorithm family %q`, dsigInfo.Family) } - return VerifyHMAC(hmackey, payload, signature, h) } -func dispatchRSAVerify(key any, alg string, payload, signature []byte) error { - h, pss, err := RSAHashFuncFor(alg) - if err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to get hash function for %s: %w`, alg, err) +func dispatchHMACVerify(key any, dsigAlg string, payload, signature []byte) error { + var hmackey []byte + if err := keyconv.ByteSliceKey(&hmackey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. []byte is required: %w`, key, err) } - var pubkey *rsa.PublicKey + return dsig.Verify(hmackey, dsigAlg, payload, signature) +} - if cs, ok := key.(crypto.Signer); ok { - cpub := cs.Public() - switch cpub := cpub.(type) { - case rsa.PublicKey: - pubkey = &cpub - case *rsa.PublicKey: - pubkey = cpub - default: - return fmt.Errorf(`jwsbb.Verify: failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key) - } - } else { - if err := keyconv.RSAPublicKey(&pubkey, key); err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to retrieve rsa.PublicKey out of %T: %w`, key, err) +func dispatchRSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an RSA key + if _, ok := signer.Public().(*rsa.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) } } - return VerifyRSA(pubkey, payload, signature, h, pss) -} - -func dispatchECDSAVerify(key any, alg string, payload, signature []byte) error { - h, err := ECDSAHashFuncFor(alg) - if err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to get hash function for %s: %w`, alg, err) + // Fall back to concrete key types + var pubkey *rsa.PublicKey + if err := keyconv.RSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. *rsa.PublicKey is required: %w`, key, err) } - pubkey, cs, isCryptoSigner, err := ecdsaGetVerifierKey(key) - if err != nil { - return fmt.Errorf(`jwsbb.Verify: %w`, err) - } - if isCryptoSigner { - return VerifyECDSACryptoSigner(cs, payload, signature, h) - } - return VerifyECDSA(pubkey, payload, signature, h) + return dsig.Verify(pubkey, dsigAlg, payload, signature) } -func dispatchEdDSAVerify(key any, _ string, payload, signature []byte) error { - var pubkey ed25519.PublicKey - signer, ok := key.(crypto.Signer) - if ok { - v := signer.Public() - pubkey, ok = v.(ed25519.PublicKey) - if !ok { - return fmt.Errorf(`jwsbb.Verify: expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v) - } - } else { - if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil { - return fmt.Errorf(`jwsbb.Verify: failed to retrieve ed25519.PublicKey out of %T: %w`, key, err) +func dispatchECDSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an ECDSA key + if _, ok := signer.Public().(*ecdsa.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) } } - return VerifyEdDSA(pubkey, payload, signature) + // Fall back to concrete key types + var pubkey *ecdsa.PublicKey + if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. *ecdsa.PublicKey is required: %w`, key, err) + } + + return dsig.Verify(pubkey, dsigAlg, payload, signature) } -func ecdsaGetVerifierKey(key any) (*ecdsa.PublicKey, crypto.Signer, bool, error) { - cs, isCryptoSigner := key.(crypto.Signer) - if isCryptoSigner { - switch key.(type) { - case ecdsa.PublicKey, *ecdsa.PublicKey: - // if it's ecdsa.PublicKey, it's more efficient to - // go through the non-crypto.Signer route. Set isCryptoSigner to false - isCryptoSigner = false +func dispatchEdDSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an EdDSA key + if _, ok := signer.Public().(ed25519.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) } } - if isCryptoSigner { - return nil, cs, true, nil - } - - var pubkey *ecdsa.PublicKey - if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil { - return nil, nil, false, fmt.Errorf(`invalid key type %T. ecdsa.PublicKey is required: %w`, key, err) + // Fall back to concrete key types + var pubkey ed25519.PublicKey + if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. ed25519.PublicKey is required: %w`, key, err) } - return pubkey, nil, false, nil + return dsig.Verify(pubkey, dsigAlg, payload, signature) } diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json new file mode 100644 index 000000000..0a37621d0 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json @@ -0,0 +1,4867 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/cmd/bench.go b/vendor/github.com/open-policy-agent/opa/cmd/bench.go index b0bb908a5..ca9d2aaf5 100644 --- a/vendor/github.com/open-policy-agent/opa/cmd/bench.go +++ b/vendor/github.com/open-policy-agent/opa/cmd/bench.go @@ -96,7 +96,7 @@ The optional "gobench" output format conforms to the Go Benchmark Data Format. cmd.SilenceErrors = true cmd.SilenceUsage = true - exit, err := benchMain(args, params, os.Stdout, &goBenchRunner{}) + exit, err := benchMain(args, params, os.Stdout, os.Stderr, &goBenchRunner{}) if err != nil { // NOTE: err should only be non-nil if a (highly unlikely) // presentation error occurs. @@ -146,14 +146,14 @@ type benchRunner interface { run(ctx context.Context, ectx *evalContext, params benchmarkCommandParams, f func(context.Context, ...rego.EvalOption) error) (testing.BenchmarkResult, error) } -func benchMain(args []string, params benchmarkCommandParams, w io.Writer, r benchRunner) (int, error) { +func benchMain(args []string, params benchmarkCommandParams, w io.Writer, stderr io.Writer, r benchRunner) (int, error) { ctx := context.Background() if params.e2e { err := benchE2E(ctx, args, params, w) if err != nil { - errRender := renderBenchmarkError(params, err, w) + errRender := renderBenchmarkError(params, err, w, stderr) return 1, errRender } return 0, nil @@ -161,7 +161,7 @@ func benchMain(args []string, params benchmarkCommandParams, w io.Writer, r benc ectx, err := setupEval(args, params.evalCommandParams) if err != nil { - errRender := renderBenchmarkError(params, err, w) + errRender := renderBenchmarkError(params, err, w, stderr) return 1, errRender } @@ -182,7 +182,7 @@ func benchMain(args []string, params benchmarkCommandParams, w io.Writer, r benc // Take the eval context and prepare anything else we possible can before benchmarking the evaluation pq, err := rg.PrepareForEval(ctx) if err != nil { - errRender := renderBenchmarkError(params, err, w) + errRender := renderBenchmarkError(params, err, w, stderr) return 1, errRender } @@ -199,7 +199,7 @@ func benchMain(args []string, params benchmarkCommandParams, w io.Writer, r benc // As with normal evaluation, prepare as much as possible up front. pq, err := rg.PrepareForPartial(ctx) if err != nil { - errRender := renderBenchmarkError(params, err, w) + errRender := renderBenchmarkError(params, err, w, stderr) return 1, errRender } @@ -218,7 +218,7 @@ func benchMain(args []string, params benchmarkCommandParams, w io.Writer, r benc for range params.count { br, err := r.run(ctx, ectx, params, benchFunc) if err != nil { - errRender := renderBenchmarkError(params, err, w) + errRender := renderBenchmarkError(params, err, w, stderr) return 1, errRender } renderBenchmarkResult(params, br, w) @@ -630,7 +630,7 @@ func renderBenchmarkResult(params benchmarkCommandParams, br testing.BenchmarkRe } } -func renderBenchmarkError(params benchmarkCommandParams, err error, w io.Writer) error { +func renderBenchmarkError(params benchmarkCommandParams, err error, w io.Writer, stderr io.Writer) error { o := presentation.Output{ Errors: presentation.NewOutputErrors(err), } @@ -639,7 +639,7 @@ func renderBenchmarkError(params benchmarkCommandParams, err error, w io.Writer) case formats.JSON: return presentation.JSON(w, o) default: - return presentation.Pretty(w, o) + return presentation.Pretty(w, stderr, o) } } diff --git a/vendor/github.com/open-policy-agent/opa/cmd/eval.go b/vendor/github.com/open-policy-agent/opa/cmd/eval.go index 5f9f93c3c..5b70495d2 100644 --- a/vendor/github.com/open-policy-agent/opa/cmd/eval.go +++ b/vendor/github.com/open-policy-agent/opa/cmd/eval.go @@ -310,7 +310,7 @@ access. cmd.SilenceErrors = true cmd.SilenceUsage = true - defined, err := eval(args, params, os.Stdout) + defined, err := eval(args, params, os.Stdout, os.Stderr) if err != nil { if _, ok := err.(regoError); !ok { fmt.Fprintln(os.Stderr, err) @@ -372,7 +372,7 @@ access. root.AddCommand(evalCommand) } -func eval(args []string, params evalCommandParams, w io.Writer) (bool, error) { +func eval(args []string, params evalCommandParams, w io.Writer, stderr io.Writer) (bool, error) { ctx := context.Background() if params.timeout != 0 { var cancel func() @@ -429,20 +429,20 @@ func eval(args []string, params evalCommandParams, w io.Writer) (bool, error) { switch ectx.params.outputFormat.String() { case formats.Bindings: - err = pr.Bindings(w, result) + err = pr.Bindings(w, stderr, result) case formats.Values: - err = pr.Values(w, result) + err = pr.Values(w, stderr, result) case formats.Pretty: - err = pr.PrettyWithOptions(w, result, pr.PrettyOptions{ + err = pr.PrettyWithOptions(w, stderr, result, pr.PrettyOptions{ TraceOpts: topdown.PrettyTraceOptions{ Locations: true, ExprVariables: ectx.params.traceVarValues, }, }) case formats.Source: - err = pr.Source(w, result) + err = pr.Source(w, stderr, result) case formats.Raw: - err = pr.Raw(w, result) + err = pr.Raw(w, stderr, result) case formats.Discard: err = pr.Discard(w, result) default: diff --git a/vendor/github.com/open-policy-agent/opa/internal/compile/checks.go b/vendor/github.com/open-policy-agent/opa/internal/compile/checks.go new file mode 100644 index 000000000..ed50cffb6 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/compile/checks.go @@ -0,0 +1,266 @@ +// Copyright 2025 The OPA Authors +// SPDX-License-Identifier: Apache-2.0 + +package compile + +import ( + "cmp" + "fmt" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/rego" +) + +const code = "pe_fragment_error" // TODO(sr): this is preliminary + +type Results struct { + errs []*ast.Error +} + +func (r *Results) ASTErrors() []*ast.Error { + if r == nil { + return nil + } + return r.errs +} + +type checker struct { + constraints Constraints + shortUnknowns Set[string] + res *Results +} + +func (c *checker) Results() *Results { + return c.res +} + +// Check performs a set of checks on the given partial queries and support modules. +// The constraints are used to determine which features are allowed in the partial queries. +// The shorts are used to determine which short names are allowed, e.g. `input.foo` is +// allowed if it's mapped to some table column. +func Check(pq *rego.PartialQueries, constraints Constraints, shorts Set[string]) *Results { + check := checker{ + constraints: constraints, + shortUnknowns: shorts, + res: &Results{}, + } + for i := range pq.Queries { + check.Query(pq.Queries[i], pq.Support) + } + // NOTE(sr): So far, we've gotten better error locations from the refs into + // support modules. The support modules themselves are surprisingly useless + // for that. + // for i := range pq.Support { + // checkSupport(pq.Support[i], &res) + // } + return check.Results() +} + +func (c *checker) Query(q ast.Body, sup []*ast.Module) { + for j := range q { + for i := range queryChecks { + if err := queryChecks[i](c, q[j], sup); err != nil { + c.res.errs = append(c.res.errs, err) + } + } + } +} + +var queryChecks = [...]func(*checker, *ast.Expr, []*ast.Module) *ast.Error{ + checkCall, + checkBuiltins, +} + +var partialPrefix = ast.MustParseRef("data.partial") + +func checkCall(c *checker, e *ast.Expr, sup []*ast.Module) *ast.Error { + switch { + case e.Negated: + if !e.IsCall() { // IsCall gives us comparisons etc, and rules out naked data refs + return err(e.Loc(), "\"not\" not permitted") + } + err0 := c.constraints.AssertFeature("not") + if err0 == nil { + break // OK + } + return err(e.Loc(), "\"not\" not permitted: %s", err0.Error()) + case e.IsCall(): // OK + case e.IsEvery(): + return err(e.Loc(), "\"every\" not permitted") + case len(e.With) > 0: + return err(e.Loc(), "\"with\" not permitted") + default: + if t, ok := e.Terms.(*ast.Term); ok { + if ref, ok := t.Value.(ast.Ref); ok && ref.HasPrefix(partialPrefix) { + loc := ref[len(ref)-1].Loc() + return err(loc, "%s", findDetails(ref, sup)) + } + + if ref, ok := t.Value.(ast.Ref); ok && ref.HasPrefix(ast.DefaultRootRef) { + // TODO(sr): point to rule with else -- but we don't have the full rego yet + return withDetails(err(e.Loc(), "invalid data reference \"%v\"", e), + fmt.Sprintf("has rule \"%v\" an `else`?", ref), + ) + } + } + return withDetails(err(e.Loc(), "invalid statement \"%v\"", e), + fmt.Sprintf("try `%v != false`", e), + ) + } + return nil +} + +// some builtins need their names replaced for nicer errors +var replacements = map[string]string{ + "internal.member_2": "in", +} + +func checkBuiltins(c *checker, e *ast.Expr, _ []*ast.Module) *ast.Error { + if len(e.With) > 0 { // Ignore expression, we'll already have recorded errors through checkCalls. + return nil + } + op := e.OperatorTerm() + if op == nil { + return nil + } + loc := cmp.Or(op.Loc(), e.Loc()) + ref := op.Value.(ast.Ref) + op0 := ref.String() + + unknownMustBeFirst := false + twoRefsOK := false + + switch { + case op0 == ast.Equality.Name || + op0 == ast.NotEqual.Name || + op0 == ast.LessThan.Name || + op0 == ast.LessThanEq.Name || + op0 == ast.GreaterThan.Name || + op0 == ast.GreaterThanEq.Name: + twoRefsOK = true + case op0 == ast.StartsWith.Name || + op0 == ast.EndsWith.Name || + op0 == ast.Contains.Name || + op0 == ast.Member.Name: + unknownMustBeFirst = true + + // Below there are only error cases + case op0 == ast.MemberWithKey.Name: + return err(loc, "invalid use of \"... in ...\"") + case ref.HasPrefix(ast.DefaultRootRef): + // TODO(sr): point to function with else -- but we don't have the full rego yet + return withDetails(err(e.Loc(), "invalid data reference \"%v\"", e), + fmt.Sprintf("has function \"%v(...)\" an `else`?", ref), + ) + default: + return err(loc, "invalid builtin `%v`", op) + } + + // Also check that our target+variant allows this builtin + if err0 := c.constraints.AssertBuiltin(op0); err0 != nil { + return err(loc, "invalid builtin `%v`: %s", + cmp.Or(replacements[op0], op0), err0.Error()) + } + + // all our allowed builtins have two operands + for i := range 2 { + if err := checkOperand(c, op, e.Operand(i)); err != nil { + return err + } + } + + // check that field-ref comparisons are supported by the targets: + unknownRefs := 0 + for i := range 2 { + if _, ok := e.Operand(i).Value.(ast.Ref); ok { + unknownRefs++ + } + } + if unknownRefs == 2 { + if err0 := c.constraints.AssertFeature("field-ref"); err0 != nil { + return err(loc, "reference to field: %s", err0.Error()) + } + } + + switch { + case unknownMustBeFirst: + if _, ok := e.Operand(0).Value.(ast.Ref); !ok { + return err(loc, "rhs of %v must be known", op) + } + default: // lhs or rhs needs to be ground scalar, or, if twoRefsOK is true, unknown input refs + // TODO(sr): collections might work, too, let's fix this later + found := false + for i := range 2 { + if ast.IsScalar(e.Operand(i).Value) { + found = true + } + } + if !found && !(twoRefsOK && unknownRefs == 2) { // nolint:staticcheck + return err(loc, "both rhs and lhs non-scalar/non-ground") + } + } + return nil +} + +func checkOperand(c *checker, op, t *ast.Term) *ast.Error { + if t == nil { + return err(op.Loc(), "%v: missing operand", op) + } + loc := op.Loc() + switch v := t.Value.(type) { + case ast.Call: + if loc == nil { + loc = v[0].Loc() + } + return err(loc, "%v: nested call operand: %v", op, v) + case ast.Ref: + if v.HasPrefix(ast.InputRootRef) { + if len(v) == 3 { + return nil + } + if len(v) == 2 && c.shortUnknowns.Contains(string(v[1].Value.(ast.String))) { + return nil + } + } + if loc == nil { + loc = t.Loc() + } + return err(loc, "%v: invalid ref operand: %v", op, v) + } + return nil +} + +func err(loc *ast.Location, f string, vs ...any) *ast.Error { + return ast.NewError(code, loc, f, vs...) +} + +type Details struct { + Extra string `json:"details"` +} + +func (d *Details) Lines() []string { + return []string{d.Extra} +} + +func withDetails(err *ast.Error, dets string) *ast.Error { + err.Details = &Details{Extra: dets} + return err +} + +func findDetails(partialRef ast.Ref, sup []*ast.Module) string { + for i := range sup { + count := 0 + for j := range sup[i].Rules { + if r := sup[i].Rules[j]; r.Ref().Equal(partialRef) { + count++ + switch { + case r.Default: + return fmt.Sprintf("use of default rule in %v", ast.DefaultRootRef.Concat(r.Ref()[2:])) + case count > 1: + return fmt.Sprintf("use of multi-value rule in %v", ast.DefaultRootRef.Concat(r.Ref()[2:])) + } + } + } + } + return "" +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/compile/compile.go b/vendor/github.com/open-policy-agent/opa/internal/compile/compile.go new file mode 100644 index 000000000..90148bed5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/compile/compile.go @@ -0,0 +1,172 @@ +// Copyright 2025 The OPA Authors +// SPDX-License-Identifier: Apache-2.0 + +package compile + +import ( + "encoding/json" + "fmt" + "slices" + + "github.com/open-policy-agent/opa/internal/levenshtein" + "github.com/open-policy-agent/opa/internal/ucast" + "github.com/open-policy-agent/opa/v1/ast" +) + +const ( + invalidUnknownCode = "invalid_unknown" +) + +type UCASTNode struct { + internal *ucast.UCASTNode +} + +func (u *UCASTNode) Map() map[string]any { + if u.internal == nil { // unconditional YES + return map[string]any{} + } + + // TODO(sr): find a better way + ret := map[string]any{} + bs, err := json.Marshal(u.internal) + if err != nil { + panic(err) + } + if err := json.Unmarshal(bs, &ret); err != nil { + panic(err) + } + return ret +} + +func QueriesToUCAST(queries []ast.Body, mappings map[string]any) *UCASTNode { + return &UCASTNode{internal: BodiesToUCAST(queries, &Opts{Translations: mappings})} +} + +func QueriesToSQL(queries []ast.Body, mappings map[string]any, dialect string) (string, error) { + sql := "" + ucast := BodiesToUCAST(queries, &Opts{Translations: mappings}) + if ucast != nil { // ucast == nil means unconditional YES, for which we'll keep `sql = ""` + sql0, err := ucast.AsSQL(dialect) + if err != nil { + return "", err + } + sql = sql0 + } + + return sql, nil +} + +func ExtractUnknownsFromAnnotations(comp *ast.Compiler, ref ast.Ref) ([]ast.Ref, []*ast.Error) { + // find ast.Rule for ref + rules := comp.GetRulesExact(ref) + if len(rules) == 0 { + return nil, nil + } + rule := rules[0] // rule scope doesn't make sense here, so it doesn't matter which rule we use + return unknownsFromAnnotationsSet(comp.GetAnnotationSet(), rule) +} + +func unknownsFromAnnotationsSet(as *ast.AnnotationSet, rule *ast.Rule) ([]ast.Ref, []*ast.Error) { + if as == nil { + return nil, nil + } + var unknowns []ast.Ref + var errs []*ast.Error + + for _, ar := range as.Chain(rule) { + ann := ar.Annotations + if ann == nil || ann.Compile == nil { + continue + } + unkArray := ann.Compile.Unknowns + for _, ref := range unkArray { + if ref.HasPrefix(ast.DefaultRootRef) || ref.HasPrefix(ast.InputRootRef) { + unknowns = append(unknowns, ref) + } else { + errs = append(errs, ast.NewError(invalidUnknownCode, ann.Loc(), "unknowns must be prefixed with `input` or `data`: %v", ref)) + } + } + } + + return unknowns, errs +} + +func ExtractMaskRuleRefFromAnnotations(comp *ast.Compiler, ref ast.Ref) (ast.Ref, *ast.Error) { + // find ast.Rule for ref + rules := comp.GetRulesExact(ref) + if len(rules) == 0 { + return nil, nil + } + rule := rules[0] // rule scope doesn't make sense here, so it doesn't matter which rule we use + return maskRuleFromAnnotationsSet(comp.GetAnnotationSet(), rule) +} + +func maskRuleFromAnnotationsSet(as *ast.AnnotationSet, rule *ast.Rule) (ast.Ref, *ast.Error) { + if as == nil { + return nil, nil + } + + for _, ar := range as.Chain(rule) { + ann := ar.Annotations + if ann == nil || ann.Compile == nil { + continue + } + if maskRule := ann.Compile.MaskRule; maskRule != nil { + if !maskRule.HasPrefix(ast.DefaultRootRef) { + // If the mask_rule is not a data ref, add package prefix. + maskRule = rule.Module.Package.Path.Extend(maskRule) + } + return maskRule, nil + } + } + + return nil, nil // No mask rule found. +} + +func ShortsFromMappings(mappings map[string]any) Set[string] { + shorts := NewSet[string]() + for _, mapping := range mappings { + m, ok := mapping.(map[string]any) + if !ok { + continue + } + for n, nmap := range m { + m, ok := nmap.(map[string]any) + if !ok { + continue + } + if _, ok := m["$table"]; ok { + shorts = shorts.Add(n) + } + } + } + return shorts +} + +// Returns a list of similar rule names that might match the input string. +// Warning(philip): This is expensive, as the cost grows linearly with the +// number of rules present on the compiler. It should be used only for +// error messages. +func FuzzyRuleNameMatchHint(comp *ast.Compiler, input string) string { + rules := comp.GetRules(ast.Ref{ast.DefaultRootDocument}) + ruleNames := make([]string, 0, len(rules)) + for _, rule := range rules { + if rule.Default { + continue + } + ruleNames = append(ruleNames, rule.Module.Package.Path.String()+"."+rule.Head.Name.String()) + } + closest := levenshtein.ClosestStrings(65536, input, slices.Values(ruleNames)) + proposals := slices.Compact(closest) + + var msg string + switch len(proposals) { + case 0: + return "" + case 1: + msg = fmt.Sprintf("%s undefined, did you mean %s?", input, proposals[0]) + default: + msg = fmt.Sprintf("%s undefined, did you mean one of %v?", input, proposals) + } + return msg +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/compile/constraints.go b/vendor/github.com/open-policy-agent/opa/internal/compile/constraints.go new file mode 100644 index 000000000..f21e6b344 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/compile/constraints.go @@ -0,0 +1,226 @@ +// Copyright 2025 The OPA Authors +// SPDX-License-Identifier: Apache-2.0 + +package compile + +import ( + "errors" + "fmt" + "maps" + "strings" +) + +type Set[T comparable] map[T]struct{} + +func NewSet[T comparable](strs ...T) Set[T] { + return make(Set[T]).Add(strs...) +} + +func (s Set[T]) Clone() Set[T] { + return maps.Clone(s) +} + +func (s Set[T]) Add(strs ...T) Set[T] { + for i := range strs { + s[strs[i]] = struct{}{} + } + return s +} + +// Contains checks if a string exists in the set +func (s Set[T]) Contains(str T) bool { + _, exists := s[str] + return exists +} + +// Intersection returns a new set containing elements present in both sets +func (s Set[T]) Intersection(other Set[T]) Set[T] { + result := NewSet[T]() + // Iterate through the smaller set for better performance + if len(s) > len(other) { + s, other = other, s + } + for elem := range s { + if other.Contains(elem) { + result.Add(elem) + } + } + return result +} + +// Constraint lets us limit the Set that are allowed in a translation. +// There are hardcoded sets of supported Set. The constraints become +// effective during the post-PE analysis (compile.Checks()). +type Constraint struct { + Target string + Variant string + Builtins Set[string] + Features Set[string] +} + +type Constraints interface { + Builtin(string) bool + AssertBuiltin(string) error + Supports(string) bool + AssertFeature(string) error +} + +// NewConstraints returns a new Constraint object based on the type +// requested, ucast or sql. +func NewConstraints(typ, variant string) (*Constraint, error) { + c := Constraint{Target: strings.ToUpper(typ), Variant: variant, Features: NewSet[string]()} + switch typ { + case "sql": + switch v := strings.ToLower(variant); v { + case "sqlite": + c.Builtins = sqlSQLiteBuiltins + case "mysql", "postgresql", "sqlserver", "sqlite-internal": + c.Builtins = sqlBuiltins + default: + return nil, fmt.Errorf("unsupported variant for %s: %s", typ, variant) + } + c.Features.Add("not", "field-ref") + case "ucast": + switch v := strings.ToLower(variant); v { + case "all": + c.Variant = v + c.Features.Add("not", "field-ref") + c.Builtins = allBuiltins + case "prisma": + c.Variant = v + c.Features.Add("not") + c.Builtins = allBuiltins + case "linq": + c.Variant = "LINQ" // normalize spelling + c.Builtins = ucastLINQBuiltins + default: + c.Variant = "" + c.Builtins = ucastBuiltins + } + default: + return nil, fmt.Errorf("unknown target/dialect combination: %s/%s", typ, variant) + } + + return &c, nil +} + +// Builtin returns true if the builtin is supported by the constraint. +func (c *Constraint) Builtin(x string) bool { + return c.Builtins.Contains(x) +} + +func (c *Constraint) AssertBuiltin(x string) error { + if !c.Builtin(x) { + return fmt.Errorf("unsupported for %s", c) + } + return nil +} + +// Supports allows us to encode more fluent constraints, like support for "not". +// It returns an error if the feature is not supported by the constraint. +func (c *Constraint) Supports(x string) bool { + return c.Features.Contains(x) +} + +func (c *Constraint) AssertFeature(x string) error { + if !c.Supports(x) { + return fmt.Errorf("unsupported feature %q for %s", x, c) + } + return nil +} + +var _ fmt.Stringer = (*Constraint)(nil) + +func (c *Constraint) String() string { + if c.Variant == "" { + return c.Target + } + return fmt.Sprintf("%s (%s)", c.Target, c.Variant) +} + +type ConstraintSet struct { + Constraints []*Constraint +} + +func NewConstraintSet(cs ...*Constraint) *ConstraintSet { + return &ConstraintSet{Constraints: cs} +} + +// Builtin returns true if all the constraints in the set support the builtin. +func (cs *ConstraintSet) Builtin(x string) bool { + for i := range cs.Constraints { + if !cs.Constraints[i].Builtin(x) { + return false + } + } + return true +} + +func (cs *ConstraintSet) AssertBuiltin(x string) error { + var err error + for i := range cs.Constraints { + if e := cs.Constraints[i].AssertBuiltin(x); e != nil { + err = errors.Join(err, e) + } + } + return err +} + +// Supports returns true if the feature is supported by all constraints. +func (cs *ConstraintSet) Supports(x string) bool { + for i := range cs.Constraints { + if !cs.Constraints[i].Supports(x) { + return false + } + } + return true +} + +// AssertFeature returns an error if the feature is not supported by any +// of the constraints. The error contains the offending target/dialect pairs. +func (cs *ConstraintSet) AssertFeature(x string) error { + var err error + for i := range cs.Constraints { + if e := cs.Constraints[i].AssertFeature(x); e != nil { + err = errors.Join(err, e) + } + } + return err +} + +var _ fmt.Stringer = (*ConstraintSet)(nil) + +func (cs *ConstraintSet) String() string { + result := make([]string, 0, len(cs.Constraints)+1) + for i := range cs.Constraints { + result = append(result, cs.Constraints[i].String()) + } + return "multi-constraint: " + strings.Join(result, ", ") +} + +var ( + sqlBuiltins = allBuiltins + + // sqlite doesn't support startswith/endswith/contains + sqlSQLiteBuiltins = ucastBuiltins.Clone().Add("internal.member_2") + + ucastBuiltins = NewSet( + "eq", + "neq", + "lt", + "lte", + "gt", + "gte", + ) + allBuiltins = ucastBuiltins.Clone().Add( + "internal.member_2", + // "nin", // TODO: deal with NOT IN + "startswith", + "endswith", + "contains", + ) + ucastLINQBuiltins = ucastBuiltins.Clone().Add( + "internal.member_2", + // "nin", // TODO: deal with NOT IN + ) +) diff --git a/vendor/github.com/open-policy-agent/opa/internal/compile/ucast.go b/vendor/github.com/open-policy-agent/opa/internal/compile/ucast.go new file mode 100644 index 000000000..8a280b4cb --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/compile/ucast.go @@ -0,0 +1,232 @@ +// Copyright 2025 The OPA Authors +// SPDX-License-Identifier: Apache-2.0 + +package compile + +import ( + "cmp" + "fmt" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + + "github.com/open-policy-agent/opa/internal/ucast" +) + +type Opts struct { + Translations map[string]any +} + +func BodiesToUCAST(bs []ast.Body, opts *Opts) *ucast.UCASTNode { + if len(bs) == 0 { + return nil + } + + // If there's only one body, convert it directly + if len(bs) == 1 { + return bodyToUCAST(bs[0], opts) + } + + // Multiple expressions are combined with AND + nodes := make([]ucast.UCASTNode, len(bs)) + for i := range bs { + u := bodyToUCAST(bs[i], opts) + if u == nil { + return nil + } + nodes[i] = *u + } + return &ucast.UCASTNode{ + Type: "compound", + Op: "or", + Value: nodes, + } +} + +func bodyToUCAST(body ast.Body, opts *Opts) *ucast.UCASTNode { + if len(body) == 0 { + return nil + } + + // If there's only one expression, convert it directly + if len(body) == 1 { + return exprToUCAST(body[0], opts) + } + + // Multiple expressions are combined with AND + nodes := make([]ucast.UCASTNode, len(body)) + for i, expr := range body { + u := exprToUCAST(expr, opts) + if u == nil { + return nil + } + nodes[i] = *u + } + return &ucast.UCASTNode{ + Type: "compound", + Op: "and", + Value: nodes, + } +} + +func exprToUCAST(expr *ast.Expr, opts *Opts) *ucast.UCASTNode { + if expr == nil || !expr.IsCall() { + return nil + } + + ref, flip := refFromCall(expr) + return callToNode(expr, ref, flip, opts) +} + +// refToField drops the first part of ast.Ref, and joins the rest with "." +func refToField(r ast.Ref, opts *Opts) (string, error) { + parts := make([]string, len(r)-1) + for i := range len(r) - 1 { + switch t := r[i+1].Value.(type) { + case ast.Var: + parts[i] = string(t) + case ast.String: + parts[i] = string(t) + default: + return "", fmt.Errorf("unexpected type in ref %v: %T (%[2]v)", r, t) + } + } + return translateField(strings.Join(parts, "."), opts.Translations), nil +} + +func toFieldNode(op string, r ast.Ref, v ast.Value, opts *Opts, refOK bool) *ucast.UCASTNode { + var value any + switch v := v.(type) { + case ast.Ref: + if refOK { + f, err := refToField(v, opts) + if err != nil { + return nil + } + value = ucast.FieldRef{ + Field: f, + } + } + default: + var err error + value, err = ast.ValueToInterface(v, nil) + if err != nil { + return nil + } + } + + f, err := refToField(r, opts) + if err != nil { + return nil + } + if value == nil { + value = ucast.Null{} + } + return &ucast.UCASTNode{ + Type: "field", + Op: op, + Field: f, + Value: value, + } +} + +var reversed = map[string]string{ + "lt": "gte", + "lte": "gt", + "gt": "lte", + "gte": "lt", +} + +// callToNode converts a call expression to a UCASTNode, and flips the arguments +// and the comparison operator if needed. +func callToNode(e *ast.Expr, f ast.Ref, flip bool, opts *Opts) *ucast.UCASTNode { + ref := e.OperatorTerm().Value.(ast.Ref) + op := ref.String() + + refOK := false + switch op { + case ast.NotEqual.Name: + refOK = true + op = "ne" + case ast.Equality.Name, + ast.LessThan.Name, + ast.LessThanEq.Name, + ast.GreaterThan.Name, + ast.GreaterThanEq.Name: + refOK = true + case ast.StartsWith.Name: + case ast.EndsWith.Name: + case ast.Contains.Name: + case ast.Member.Name: + op = "in" + default: + return nil + } + + i := 1 + if flip { + i = 0 + op = cmp.Or(reversed[op], op) // optionally replace operator + } + + fn := toFieldNode(op, f, e.Operand(i).Value, opts, refOK) + if !e.Negated { + return fn + } + + value := make([]ucast.UCASTNode, 1) + value[0] = *fn + return &ucast.UCASTNode{ + Type: "compound", + Op: "not", + Value: value, + } +} + +func refFromCall(e *ast.Expr) (ast.Ref, bool) { + leftRef, ok := e.Operand(0).Value.(ast.Ref) + if ok { // lhs is unknown + return leftRef, false + } + return e.Operand(1).Value.(ast.Ref), true // rhs is unknown +} + +func translateField(field string, translations map[string]any) string { + var outTable, outColumn string + if translations == nil { + return field + } + before, after, found := strings.Cut(field, ".") + outTable = before + outColumn = after + if tableMapping := translations[before]; tableMapping != nil { + if tableMapping, ok := tableMapping.(map[string]any); ok { + // See if there's a $table ref for the short unknown, and remap. + if tableName, ok := tableMapping["$table"]; ok { + outTable = tableName.(string) + // swap, make believe we picked up '.name', not 'name' + before, after, found = outTable, before, true + outColumn = after + } + } + } + // Is there a translation available for the table name? + if tableMapping, ok := translations[before]; ok { + if tableMapping, ok := tableMapping.(map[string]any); ok { + // See if there's a mapping for the table name, and remap. + if tableName, ok := tableMapping["$self"]; ok { + outTable = tableName.(string) // XXX: be more cautious about the type + } + // If we have a column name, try remapping it. + if found { + if columnName, ok := tableMapping[after]; ok { + outColumn = columnName.(string) + } + } + } + } + if found { + return outTable + "." + outColumn + } + return outTable +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/levenshtein/levenshtein.go b/vendor/github.com/open-policy-agent/opa/internal/levenshtein/levenshtein.go new file mode 100644 index 000000000..63b93aa6d --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/levenshtein/levenshtein.go @@ -0,0 +1,30 @@ +// Copyright 2025 The OPA Authors +// SPDX-License-Identifier: Apache-2.0 + +package levenshtein + +import ( + "iter" + "slices" + + "github.com/agnivade/levenshtein" +) + +func ClosestStrings(minDistance int, a string, candidates iter.Seq[string]) []string { + closestStrings := []string{} + for c := range candidates { + levDist := levenshtein.ComputeDistance(a, c) + switch { + case levDist < minDistance: + closestStrings = []string{c} + minDistance = levDist + case levDist == minDistance: + closestStrings = append(closestStrings, c) + minDistance = levDist + default: + continue + } + } + slices.Sort(closestStrings) + return closestStrings +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/presentation/presentation.go b/vendor/github.com/open-policy-agent/opa/internal/presentation/presentation.go index 4329a3dcd..062c4e823 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/presentation/presentation.go +++ b/vendor/github.com/open-policy-agent/opa/internal/presentation/presentation.go @@ -11,7 +11,6 @@ import ( "errors" "fmt" "io" - "os" "sort" "strconv" "strings" @@ -248,10 +247,10 @@ func JSON(w io.Writer, x any) error { return encoder.Encode(x) } -// Bindings prints the bindings from r to w. -func Bindings(w io.Writer, r Output) error { +// Bindings prints the bindings from r to w, errors are written to errW +func Bindings(w io.Writer, errW io.Writer, r Output) error { if r.Errors != nil { - return prettyError(w, r.Errors) + return prettyError(errW, r.Errors) } for _, rs := range r.Result { if err := JSON(w, rs.Bindings); err != nil { @@ -261,26 +260,26 @@ func Bindings(w io.Writer, r Output) error { return nil } -// Values prints the values from r to w. -func Values(w io.Writer, r Output) error { +// Values prints the values from r to w, errors are written to errW +func Values(w io.Writer, errW io.Writer, r Output) error { if r.Errors != nil { - return prettyError(w, r.Errors) + return prettyError(errW, r.Errors) } for _, rs := range r.Result { line := make([]any, len(rs.Expressions)) for i := range line { line[i] = rs.Expressions[i].Value } - if err := JSON(os.Stdout, line); err != nil { + if err := JSON(w, line); err != nil { return err } } return nil } -// Pretty prints all of r to w in a human-readable format. -func Pretty(w io.Writer, r Output) error { - return PrettyWithOptions(w, r, PrettyOptions{ +// Pretty prints all of r to w in a human-readable format, errors are written to errW +func Pretty(w io.Writer, errW io.Writer, r Output) error { + return PrettyWithOptions(w, errW, r, PrettyOptions{ TraceOpts: topdown.PrettyTraceOptions{ Locations: true, }, @@ -291,15 +290,15 @@ type PrettyOptions struct { TraceOpts topdown.PrettyTraceOptions } -// PrettyWithOptions prints all of r to w in a human-readable format. -func PrettyWithOptions(w io.Writer, r Output, opts PrettyOptions) error { +// PrettyWithOptions prints all of r to w in a human-readable format, errors are written to errW +func PrettyWithOptions(w io.Writer, errW io.Writer, r Output, opts PrettyOptions) error { if len(r.Explanation) > 0 { if err := prettyExplanation(w, r.Explanation, opts.TraceOpts); err != nil { return err } } if r.Errors != nil { - if err := prettyError(w, r.Errors); err != nil { + if err := prettyError(errW, r.Errors); err != nil { return err } } else if r.undefined() { @@ -342,11 +341,11 @@ func PrettyWithOptions(w io.Writer, r Output, opts PrettyOptions) error { } // Source prints partial evaluation results in r to w in a source file friendly -// format. -func Source(w io.Writer, r Output) error { +// format, errors are written to errW +func Source(w io.Writer, errW io.Writer, r Output) error { if r.Errors != nil { - return prettyError(w, r.Errors) + return prettyError(errW, r.Errors) } for i := range r.Partial.Queries { @@ -370,13 +369,13 @@ func Source(w io.Writer, r Output) error { return nil } -// Raw prints the values from r to w. Each result is written on a separate +// Raw prints the values from r to w, errors are written to errW. Each result is written on a separate // line, and the expressions are separated by spaces. If the values are // strings, they are written directly rather than formatted as compact // JSON strings. This output format makes OPA useful in a scripting context. -func Raw(w io.Writer, r Output) error { +func Raw(w io.Writer, errW io.Writer, r Output) error { if r.Errors != nil { - return prettyError(w, r.Errors) + return prettyError(errW, r.Errors) } for _, rs := range r.Result { diff --git a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go index 4f93a4f12..530deadb9 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go +++ b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go @@ -176,7 +176,7 @@ func LoadPathsForRegoVersion(regoVersion ast.RegoVersion, } } - if len(nonBundlePaths) == 0 { + if asBundle { return &result, nil } diff --git a/vendor/github.com/open-policy-agent/opa/internal/ucast/ucast.go b/vendor/github.com/open-policy-agent/opa/internal/ucast/ucast.go new file mode 100644 index 000000000..9e24097b5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/internal/ucast/ucast.go @@ -0,0 +1,246 @@ +// Copyright 2025 The OPA Authors +// SPDX-License-Identifier: Apache-2.0 + +package ucast + +import ( + "errors" + "fmt" + "slices" + "strings" + + "github.com/huandu/go-sqlbuilder" +) + +// The "union" structure for incoming UCAST trees. +type UCASTNode struct { + Type string `json:"type"` + Op string `json:"operator"` + Field string `json:"field,omitempty"` + Value any `json:"value,omitempty"` +} + +// FieldRef can be used in UCASTNode.Value to reference another field, i.e. database column. +type FieldRef struct { + Field string `json:"field"` +} + +// Null represents a NULL value in a SQL query. We need our own type +// to control both the JSON marshalling and the SQL generation. +type Null struct{} + +func (Null) MarshalJSON() ([]byte, error) { + return []byte("null"), nil +} + +var ( + compoundOps = []string{"and", "or", "not"} + documentOps = []string{"exists"} + fieldOps = []string{"eq", "ne", "gt", "lt", "ge", "le", "gte", "lte", "in"} // "nin" +) + +func dialectToFlavor(dialect string) sqlbuilder.Flavor { + switch dialect { + case "mysql": + return sqlbuilder.MySQL + case "sqlite", "sqlite-internal": + return sqlbuilder.SQLite + case "postgres", "postgresql": + return sqlbuilder.PostgreSQL + case "sqlserver": + return sqlbuilder.SQLServer + default: + return sqlbuilder.SQLite + } +} + +func interpolateByDialect(dialect string, s string, args []any) (string, error) { + return dialectToFlavor(dialect).Interpolate(s, args) +} + +func (u *UCASTNode) AsSQL(dialect string) (string, error) { + // Build up the SQL expression using the UCASTNode tree. + cond := sqlbuilder.NewCond() + where := sqlbuilder.NewWhereClause() + conditionStr, err := u.asSQL(cond, dialect) + if err != nil { + return "", err + } + where.AddWhereExpr(cond.Args, conditionStr) + s, args := where.BuildWithFlavor(dialectToFlavor(dialect)) + // Interpolate in the arguments into the SQL string. + return interpolateByDialect(dialect, s, args) +} + +// Uses our SQL generator library to build up a larger SQL expression. +func (u *UCASTNode) asSQL(cond *sqlbuilder.Cond, dialect string) (string, error) { + cond.Args.Flavor = dialectToFlavor(dialect) + uType := u.Type + operator := u.Op + field := u.Field + value := u.Value + + switch { + case slices.Contains(fieldOps, operator) || uType == "field": + switch value { + case nil: + return "", nil + case Null{}: + switch operator { + case "eq": + return cond.IsNull(field), nil + case "ne": + return cond.IsNotNull(field), nil + default: + return "", errors.New("null value can only be used with 'eq' or 'ne' operators") + } + default: + if fr, ok := value.(FieldRef); ok { + value = sqlbuilder.Raw(fr.Field) + } + } + switch operator { + case "eq": + return cond.Equal(field, value), nil + case "ne": + return cond.NotEqual(field, value), nil + case "gt": + return cond.GreaterThan(field, value), nil + case "lt": + return cond.LessThan(field, value), nil + case "ge", "gte": + return cond.GreaterEqualThan(field, value), nil + case "le", "lte": + return cond.LessEqualThan(field, value), nil + case "in": + if arr, ok := (value).([]any); ok { + return cond.In(field, arr...), nil + } + return "", errors.New("field operator 'in' requires collection argument") + case "startswith": + if dialect == "sqlite-internal" { + return cond.Var(sqlbuilder.Build("internal_startswith($?, $?)", sqlbuilder.Raw(field), value)), nil + } + pattern, err := prefix(value) + if err != nil { + return "", err + } + return cond.Like(field, pattern), nil + case "endswith": + if dialect == "sqlite-internal" { + return cond.Var(sqlbuilder.Build("internal_endswith($?, $?)", sqlbuilder.Raw(field), value)), nil + } + pattern, err := suffix(value) + if err != nil { + return "", err + } + return cond.Like(field, pattern), nil + case "contains": + if dialect == "sqlite-internal" { + return cond.Var(sqlbuilder.Build("internal_contains($?, $?)", sqlbuilder.Raw(field), value)), nil + } + pattern, err := infix(value) + if err != nil { + return "", err + } + return cond.Like(field, pattern), nil + + default: + return "", fmt.Errorf("unrecognized operator: %s", operator) + } + case slices.Contains(documentOps, operator) || uType == "document": + // Note: We should add unary operations under this case, like NOT. + if value == nil { + return "", errors.New("document expression 'exists' requires a value") + } + if operator == "exists" { + return cond.Exists(value), nil + } + return "", fmt.Errorf("unrecognized operator: %s", operator) + case slices.Contains(compoundOps, operator) || uType == "compound": + switch operator { + case "and": + if value == nil { + return "", errors.New("compound expression 'and' requires a value") + } + if values, ok := (value).([]UCASTNode); ok { + conds := make([]string, 0, len(values)) + for _, c := range values { + condition, err := c.asSQL(cond, dialect) + if err != nil { + return "", err + } + conds = append(conds, condition) + } + return cond.And(conds...), nil + } + return "", errors.New("value must be an array") + case "or": + if value == nil { + return "", errors.New("compound expression 'or' requires a value") + } + if values, ok := (value).([]UCASTNode); ok { + conds := make([]string, 0, len(values)) + for _, c := range values { + condition, err := c.asSQL(cond, dialect) + if err != nil { + return "", err + } + conds = append(conds, condition) + } + return cond.Or(conds...), nil + } + return "", errors.New("value must be an array") + case "not": + if value == nil { + return "", errors.New("compound expression 'not' requires exactly one value") + } + node, ok := (value).([]UCASTNode) + if ok { + if len(node) != 1 { + return "", errors.New("compound expression 'not' requires exactly one value") + } + condition, err := node[0].asSQL(cond, dialect) + if err != nil { + return "", err + } + return cond.Not(condition), nil + } + return "", fmt.Errorf("value must be a ucast node, got %T: %[1]v", value) + } + return "", fmt.Errorf("unrecognized operator: %s", operator) + default: + return "", fmt.Errorf("unrecognized operator: %s", operator) + } +} + +func prefix(p any) (string, error) { + p0, ok := p.(string) + if !ok { + return "", fmt.Errorf("'startswith' pattern requires string argument, got %v %[1]T", p) + } + return escaped(p0) + "%", nil +} + +func suffix(p any) (string, error) { + p0, ok := p.(string) + if !ok { + return "", fmt.Errorf("'endswith' pattern requires string argument, got %v %[1]T", p) + } + return "%" + escaped(p0), nil +} + +func infix(p any) (string, error) { + p0, ok := p.(string) + if !ok { + return "", fmt.Errorf("'contains' pattern requires string argument, got %v %[1]T", p) + } + return "%" + escaped(p0) + "%", nil +} + +func escaped(p0 string) string { + p0 = strings.ReplaceAll(p0, `\`, `\\`) + p0 = strings.ReplaceAll(p0, "_", `\_`) + p0 = strings.ReplaceAll(p0, "%", `\%`) + return p0 +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go index 3465f0808..36f854c61 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go @@ -34,6 +34,7 @@ type ( RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"` Authors []*AuthorAnnotation `json:"authors,omitempty"` Schemas []*SchemaAnnotation `json:"schemas,omitempty"` + Compile *CompileAnnotation `json:"compile,omitempty"` Custom map[string]any `json:"custom,omitempty"` Location *Location `json:"location,omitempty"` @@ -48,6 +49,11 @@ type ( Definition *any `json:"definition,omitempty"` } + CompileAnnotation struct { + Unknowns []Ref `json:"unknowns,omitempty"` + MaskRule Ref `json:"mask_rule,omitempty"` // NOTE: This doesn't need to start with "data.package", it can be relative + } + AuthorAnnotation struct { Name string `json:"name"` Email string `json:"email,omitempty"` @@ -151,6 +157,10 @@ func (a *Annotations) Compare(other *Annotations) int { return cmp } + if cmp := a.Compile.Compare(other.Compile); cmp != 0 { + return cmp + } + if a.Entrypoint != other.Entrypoint { if a.Entrypoint { return 1 @@ -403,6 +413,8 @@ func (a *Annotations) Copy(node Node) *Annotations { cpy.Schemas[i] = a.Schemas[i].Copy() } + cpy.Compile = a.Compile.Copy() + if a.Custom != nil { cpy.Custom = deepcopy.Map(a.Custom) } @@ -716,6 +728,44 @@ func (s *SchemaAnnotation) String() string { return string(bs) } +// Copy returns a deep copy of s. +func (c *CompileAnnotation) Copy() *CompileAnnotation { + if c == nil { + return nil + } + cpy := *c + for i := range c.Unknowns { + cpy.Unknowns[i] = c.Unknowns[i].Copy() + } + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (c *CompileAnnotation) Compare(other *CompileAnnotation) int { + switch { + case c == nil && other == nil: + return 0 + case c != nil && other == nil: + return 1 + case c == nil && other != nil: + return -1 + } + + if cmp := slices.CompareFunc(c.Unknowns, other.Unknowns, + func(x, y Ref) int { + return x.Compare(y) + }); cmp != 0 { + return cmp + } + return c.MaskRule.Compare(other.MaskRule) +} + +func (c *CompileAnnotation) String() string { + bs, _ := json.Marshal(c) + return string(bs) +} + func newAnnotationSet() *AnnotationSet { return &AnnotationSet{ byRule: map[*Rule][]*Annotations{}, diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/index.go b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go index bcaf4a706..845447b6d 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/index.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go @@ -213,7 +213,7 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { return result, nil } -func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) { +func (i *baseDocEqIndex) AllRules(ValueResolver) (*IndexResult, error) { tr := newTrieTraversalResult() // Walk over the rule trie and accumulate _all_ rules @@ -285,33 +285,51 @@ func newrefindices(isVirtual func(Ref) bool) *refindices { } } +// anyValue is a fake variable we used to put "naked ref" expressions +// into the rule index +var anyValue = Var("__any__") + // Update attempts to update the refindices for the given expression in the // given rule. If the expression cannot be indexed the update does not affect // the indices. func (i *refindices) Update(rule *Rule, expr *Expr) { - if expr.Negated { - return - } - if len(expr.With) > 0 { // NOTE(tsandall): In the future, we may need to consider expressions // that have with statements applied to them. return } + if expr.Negated { + // NOTE(sr): We could try to cover simple expressions, like + // not input.funky => input.funky == false or undefined (two refindex?) + return + } + op := expr.Operator() + if op == nil { + if ts, ok := expr.Terms.(*Term); ok { + // NOTE(sr): If we wanted to cover function args, we'd need to also + // check for type "Var" here. But since it's impossible to call a + // function with a undefined argument, there's no point to recording + // "needs to be anything" for function args + if ref, ok := ts.Value.(Ref); ok { // "naked ref" + i.updateEq(rule, ref, anyValue) + } + } + } + a, b := expr.Operand(0), expr.Operand(1) switch { case op.Equal(equalityRef): - i.updateEq(rule, expr) + i.updateEq(rule, a.Value, b.Value) case op.Equal(equalRef) && len(expr.Operands()) == 2: // NOTE(tsandall): if equal() is called with more than two arguments the // output value is being captured in which case the indexer cannot // exclude the rule if the equal() call would return false (because the // false value must still be produced.) - i.updateEq(rule, expr) + i.updateEq(rule, a.Value, b.Value) case op.Equal(globMatchRef) && len(expr.Operands()) == 3: // NOTE(sr): Same as with equal() above -- 4 operands means the output @@ -366,8 +384,7 @@ func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper { return nil } -func (i *refindices) updateEq(rule *Rule, expr *Expr) { - a, b := expr.Operand(0), expr.Operand(1) +func (i *refindices) updateEq(rule *Rule, a, b Value) { args := rule.Head.Args if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok { i.insert(rule, idx) @@ -426,12 +443,7 @@ func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { } func (i *refindices) insert(rule *Rule, index *refindex) { - - count, ok := i.frequency.Get(index.Ref) - if !ok { - count = 0 - } - + count, _ := i.frequency.Get(index.Ref) i.frequency.Put(index.Ref, count+1) for pos, other := range i.rules[rule] { @@ -454,7 +466,7 @@ func (i *refindices) index(rule *Rule, ref Ref) *refindex { } type trieWalker interface { - Do(x any) trieWalker + Do(any) trieWalker } type trieTraversalResult struct { @@ -816,11 +828,11 @@ func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalR // for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll // bind `args[0]` and `args[1]` to this rule when called for (x=10) and // (y=12) respectively. -func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) { - switch v := a.Value.(type) { +func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b Value) (*refindex, bool) { + switch v := a.(type) { case Var: for i, arg := range args { - if arg.Value.Compare(a.Value) == 0 { + if arg.Value.Compare(a) == 0 { if bval, ok := indexValue(b); ok { return &refindex{Ref: Ref{FunctionArgRootDocument, InternedTerm(i)}, Value: bval}, true } @@ -843,8 +855,8 @@ func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) return nil, false } -func indexValue(b *Term) (Value, bool) { - switch b := b.Value.(type) { +func indexValue(b Value) (Value, bool) { + switch b := b.(type) { case Null, Boolean, Number, String, Var: return b, true case *Array: diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go index d897952ea..1599a7355 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go @@ -1259,23 +1259,25 @@ func (p *Parser) parseLiteralExpr(negated bool) *Expr { return nil } } - // If we find a plain `every` identifier, attempt to parse an every expression, - // add hint if it succeeds. - if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { - var hint bool - t := p.save() - p.restore(s) - if expr := p.futureParser().parseEvery(); expr != nil { - _, hint = expr.Terms.(*Every) - } - p.restore(t) - if hint { - p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") + + if p.isFutureKeyword("every") { + // If we find a plain `every` identifier, attempt to parse an every expression, + // add hint if it succeeds. + if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { + var hint bool + t := p.save() + p.restore(s) + if expr := p.futureParser().parseEvery(); expr != nil { + _, hint = expr.Terms.(*Every) + } + p.restore(t) + if hint { + p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") + } } } - return expr } - return nil + return expr } func (p *Parser) parseWith() []*With { @@ -1368,26 +1370,28 @@ func (p *Parser) parseSome() *Expr { } p.restore(s) - s = p.save() // new copy for later - var hint bool - p.scan() - if term := p.futureParser().parseTermInfixCall(); term != nil { - if call, ok := term.Value.(Call); ok { - switch call[0].String() { - case Member.Name, MemberWithKey.Name: - hint = true + + if p.isFutureKeyword("in") { + s = p.save() // new copy for later + var hint bool + p.scan() + if term := p.futureParser().parseTermInfixCall(); term != nil { + if call, ok := term.Value.(Call); ok { + switch call[0].String() { + case Member.Name, MemberWithKey.Name: + hint = true + } } } - } - // go on as before, it's `some x[...]` or illegal - p.restore(s) - if hint { - p.hint("`import future.keywords.in` for `some x in xs` expressions") + // go on as before, it's `some x[...]` or illegal + p.restore(s) + if hint { + p.hint("`import future.keywords.in` for `some x in xs` expressions") + } } for { // collecting var args - p.scan() if p.s.tok != tokens.Ident { @@ -2566,6 +2570,7 @@ type rawAnnotation struct { RelatedResources []any `yaml:"related_resources"` Authors []any `yaml:"authors"` Schemas []map[string]any `yaml:"schemas"` + Compile map[string]any `yaml:"compile"` Custom map[string]any `yaml:"custom"` } @@ -2633,6 +2638,40 @@ func (b *metadataParser) Parse() (*Annotations, error) { result.RelatedResources = append(result.RelatedResources, rr) } + if raw.Compile != nil { + result.Compile = &CompileAnnotation{} + if unknowns, ok := raw.Compile["unknowns"]; ok { + if unknowns, ok := unknowns.([]any); ok { + result.Compile.Unknowns = make([]Ref, len(unknowns)) + for i := range unknowns { + if unknown, ok := unknowns[i].(string); ok { + ref, err := ParseRef(unknown) + if err != nil { + return nil, fmt.Errorf("invalid unknowns element %q: %w", unknown, err) + } + result.Compile.Unknowns[i] = ref + } + } + } + } + if mask, ok := raw.Compile["mask_rule"]; ok { + if mask, ok := mask.(string); ok { + maskTerm, err := ParseTerm(mask) + if err != nil { + return nil, fmt.Errorf("invalid mask_rule annotation %q: %w", mask, err) + } + switch v := maskTerm.Value.(type) { + case Var, String: + result.Compile.MaskRule = Ref{maskTerm} + case Ref: + result.Compile.MaskRule = v + default: + return nil, fmt.Errorf("invalid mask_rule annotation type %q: %[1]T", mask) + } + } + } + } + for _, pair := range raw.Schemas { k, v := unwrapPair(pair) @@ -2916,6 +2955,11 @@ func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool { return yes } +// isFutureKeyword answers if keyword is from the "future" with the parser options set. +func (p *Parser) isFutureKeyword(s string) bool { + return IsFutureKeywordForRegoVersion(s, p.po.RegoVersion) +} + func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { path := imp.Path.Value.(Ref) diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go index 12e159254..d008c3d44 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go @@ -462,7 +462,7 @@ func (it *iterator) Next() (*storage.Update, error) { f := it.files[it.idx] it.idx++ - isPolicy := false + var isPolicy bool if strings.HasSuffix(f.name, RegoExt) { isPolicy = true } diff --git a/vendor/github.com/open-policy-agent/opa/v1/download/download.go b/vendor/github.com/open-policy-agent/opa/v1/download/download.go index e68aef5a8..10dc222dc 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/download/download.go +++ b/vendor/github.com/open-policy-agent/opa/v1/download/download.go @@ -8,6 +8,7 @@ package download import ( "bytes" "context" + "errors" "fmt" "io" "math/rand" @@ -49,14 +50,14 @@ type Update struct { // updates from the remote HTTP endpoint that the client is configured to // connect to. type Downloader struct { - config Config // downloader configuration for tuning polling and other downloader behaviour - client rest.Client // HTTP client to use for bundle downloading - path string // path to use in bundle download request - trigger chan chan struct{} // channel to signal downloads when manual triggering is enabled - stop chan chan struct{} // used to signal plugin to stop running - f func(context.Context, Update) // callback function invoked when download updates occur - etag string // HTTP Etag for caching purposes - sizeLimitBytes *int64 // max bundle file size in bytes (passed to reader) + config Config // downloader configuration for tuning polling and other downloader behaviour + client rest.Client // HTTP client to use for bundle downloading + path string // path to use in bundle download request + trigger chan chan struct{} // channel to signal downloads when manual triggering is enabled + stop chan chan struct{} // used to signal plugin to stop running + f func(context.Context, Update) error // callback function invoked when download updates occur + etag string // HTTP Etag for caching purposes + sizeLimitBytes *int64 // max bundle file size in bytes (passed to reader) bvc *bundle.VerificationConfig respHdrTimeoutSec int64 wg sync.WaitGroup @@ -92,7 +93,7 @@ func New(config Config, client rest.Client, path string) *Downloader { } // WithCallback registers a function f to be called when download updates occur. -func (d *Downloader) WithCallback(f func(context.Context, Update)) *Downloader { +func (d *Downloader) WithCallback(f func(context.Context, Update) error) *Downloader { d.f = f return d } @@ -267,7 +268,7 @@ func (d *Downloader) oneShot(ctx context.Context) error { d.etag = "" if d.f != nil { - d.f(ctx, Update{ETag: "", Bundle: nil, Error: err, Metrics: m, Raw: nil}) + err = errors.Join(err, d.f(ctx, Update{ETag: "", Bundle: nil, Error: err, Metrics: m, Raw: nil})) } return err } @@ -276,7 +277,9 @@ func (d *Downloader) oneShot(ctx context.Context) error { d.longPollingEnabled = resp.longPoll if d.f != nil { - d.f(ctx, Update{ETag: resp.etag, Bundle: resp.b, Error: nil, Metrics: m, Raw: resp.raw, Size: resp.size}) + if err := d.f(ctx, Update{ETag: resp.etag, Bundle: resp.b, Error: nil, Metrics: m, Raw: resp.raw, Size: resp.size}); err != nil { + return err + } } return nil } diff --git a/vendor/github.com/open-policy-agent/opa/v1/download/oci_download.go b/vendor/github.com/open-policy-agent/opa/v1/download/oci_download.go index 301911795..977ffcdf7 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/download/oci_download.go +++ b/vendor/github.com/open-policy-agent/opa/v1/download/oci_download.go @@ -53,7 +53,7 @@ func NewOCI(config Config, client rest.Client, path, storePath string) *OCIDownl } // WithCallback registers a function f to be called when download updates occur. -func (d *OCIDownloader) WithCallback(f func(context.Context, Update)) *OCIDownloader { +func (d *OCIDownloader) WithCallback(f func(context.Context, Update) error) *OCIDownloader { d.f = f return d } @@ -207,14 +207,16 @@ func (d *OCIDownloader) oneShot(ctx context.Context) error { resp, err := d.download(ctx, m) if err != nil { if d.f != nil { - d.f(ctx, Update{ETag: "", Bundle: nil, Error: err, Metrics: m, Raw: nil}) + err = errors.Join(err, d.f(ctx, Update{ETag: "", Bundle: nil, Error: err, Metrics: m, Raw: nil})) } return err } d.SetCache(resp.etag) // set the current etag sha to the cache if d.f != nil { - d.f(ctx, Update{ETag: resp.etag, Bundle: resp.b, Error: nil, Metrics: m, Raw: resp.raw, Size: resp.size}) + if err := d.f(ctx, Update{ETag: resp.etag, Bundle: resp.b, Error: nil, Metrics: m, Raw: resp.raw, Size: resp.size}); err != nil { + return err + } } return nil } diff --git a/vendor/github.com/open-policy-agent/opa/v1/download/oci_downloader.go b/vendor/github.com/open-policy-agent/opa/v1/download/oci_downloader.go index 27939d92d..a2db09be7 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/download/oci_downloader.go +++ b/vendor/github.com/open-policy-agent/opa/v1/download/oci_downloader.go @@ -12,14 +12,14 @@ import ( ) type OCIDownloader struct { - config Config // downloader configuration for tuning polling and other downloader behaviour - client rest.Client // HTTP client to use for bundle downloading - path string // path for OCI image as //: - localStorePath string // path for the local OCI storage - trigger chan chan struct{} // channel to signal downloads when manual triggering is enabled - stop chan chan struct{} // used to signal plugin to stop running - f func(context.Context, Update) // callback function invoked when download updates occur - sizeLimitBytes *int64 // max bundle file size in bytes (passed to reader) + config Config // downloader configuration for tuning polling and other downloader behaviour + client rest.Client // HTTP client to use for bundle downloading + path string // path for OCI image as //: + localStorePath string // path for the local OCI storage + trigger chan chan struct{} // channel to signal downloads when manual triggering is enabled + stop chan chan struct{} // used to signal plugin to stop running + f func(context.Context, Update) error // callback function invoked when download updates occur + sizeLimitBytes *int64 // max bundle file size in bytes (passed to reader) bvc *bundle.VerificationConfig wg sync.WaitGroup logger logging.Logger diff --git a/vendor/github.com/open-policy-agent/opa/v1/download/testharness.go b/vendor/github.com/open-policy-agent/opa/v1/download/testharness.go index 654d447aa..049ef4152 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/download/testharness.go +++ b/vendor/github.com/open-policy-agent/opa/v1/download/testharness.go @@ -227,25 +227,27 @@ func (t *testFixture) setClient(client rest.Client) { t.client = client } -func (t *testFixture) oneShot(ctx context.Context, u Update) { +func (t *testFixture) oneShot(_ context.Context, u Update) error { t.updates = append(t.updates, u) if u.Error != nil { etag := t.etags["test/bundle1"] t.d.SetCache(etag) - return + return u.Error } if u.Bundle != nil { if t.mockBundleActivationError { etag := t.etags["test/bundle1"] t.d.SetCache(etag) - return + return errors.New("activation error") } } t.etags["test/bundle1"] = u.ETag + + return nil } type fileInfo struct { diff --git a/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go index 316ffe789..481f27337 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go +++ b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go @@ -19,21 +19,27 @@ import ( // Well-known metric names. const ( - BundleRequest = "bundle_request" - ServerHandler = "server_handler" - ServerQueryCacheHit = "server_query_cache_hit" - SDKDecisionEval = "sdk_decision_eval" - RegoQueryCompile = "rego_query_compile" - RegoQueryEval = "rego_query_eval" - RegoQueryParse = "rego_query_parse" - RegoModuleParse = "rego_module_parse" - RegoDataParse = "rego_data_parse" - RegoModuleCompile = "rego_module_compile" - RegoPartialEval = "rego_partial_eval" - RegoInputParse = "rego_input_parse" - RegoLoadFiles = "rego_load_files" - RegoLoadBundles = "rego_load_bundles" - RegoExternalResolve = "rego_external_resolve" + BundleRequest = "bundle_request" + ServerHandler = "server_handler" + ServerQueryCacheHit = "server_query_cache_hit" + SDKDecisionEval = "sdk_decision_eval" + RegoQueryCompile = "rego_query_compile" + RegoQueryEval = "rego_query_eval" + RegoQueryParse = "rego_query_parse" + RegoModuleParse = "rego_module_parse" + RegoDataParse = "rego_data_parse" + RegoModuleCompile = "rego_module_compile" + RegoPartialEval = "rego_partial_eval" + RegoInputParse = "rego_input_parse" + RegoLoadFiles = "rego_load_files" + RegoLoadBundles = "rego_load_bundles" + RegoExternalResolve = "rego_external_resolve" + CompilePrepPartial = "compile_prep_partial" + CompileEvalConstraints = "compile_eval_constraints" + CompileTranslateQueries = "compile_translate_queries" + CompileExtractAnnotationsUnknowns = "compile_extract_annotations_unknowns" + CompileExtractAnnotationsMask = "compile_extract_annotations_mask" + CompileEvalMaskRule = "compile_eval_mask_rule" ) // Info contains attributes describing the underlying metrics provider. diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/bundle/plugin.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/bundle/plugin.go index 60a951590..23685b63f 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/bundle/plugin.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/bundle/plugin.go @@ -441,9 +441,9 @@ func (p *Plugin) newDownloader(name string, source *Source, bundles map[string]* conf := source.Config client := p.manager.Client(source.Service) path := source.Resource - callback := func(ctx context.Context, u download.Update) { + callback := func(ctx context.Context, u download.Update) error { // wrap the callback to include the name of the bundle that was updated - p.oneShot(ctx, name, u) + return p.oneShot(ctx, name, u) } if strings.ToLower(client.Config().Type) == "oci" { ociStorePath := filepath.Join(os.TempDir(), "opa", "oci") // use temporary folder /tmp/opa/oci @@ -467,11 +467,11 @@ func (p *Plugin) newDownloader(name string, source *Source, bundles map[string]* WithBundleParserOpts(p.manager.ParserOptions()) } -func (p *Plugin) oneShot(ctx context.Context, name string, u download.Update) { +func (p *Plugin) oneShot(ctx context.Context, name string, u download.Update) error { p.mtx.Lock() defer p.mtx.Unlock() - p.process(ctx, name, u) + err := p.process(ctx, name, u) for _, listener := range p.listeners { listener(*p.status[name]) @@ -489,9 +489,11 @@ func (p *Plugin) oneShot(ctx context.Context, name string, u download.Update) { } listener(statusCpy) } + + return err } -func (p *Plugin) process(ctx context.Context, name string, u download.Update) { +func (p *Plugin) process(ctx context.Context, name string, u download.Update) error { if u.Metrics != nil { p.status[name].Metrics = u.Metrics } else { @@ -507,7 +509,7 @@ func (p *Plugin) process(ctx context.Context, name string, u download.Update) { etag := p.etags[name] p.downloaders[name].SetCache(etag) } - return + return u.Error } p.status[name].LastSuccessfulRequest = p.status[name].LastRequest @@ -530,7 +532,7 @@ func (p *Plugin) process(ctx context.Context, name string, u download.Update) { etag := p.etags[name] p.downloaders[name].SetCache(etag) } - return + return err } if u.Bundle.Type() == bundle.SnapshotBundleType && p.persistBundle(name, p.getBundlesCpy()) { @@ -544,7 +546,7 @@ func (p *Plugin) process(ctx context.Context, name string, u download.Update) { etag := p.etags[name] p.downloaders[name].SetCache(etag) } - return + return err } p.log(name).Debug("Bundle persisted to disk successfully at path %v.", filepath.Join(p.bundlePersistPath, name)) } @@ -562,7 +564,7 @@ func (p *Plugin) process(ctx context.Context, name string, u download.Update) { // If the plugin wasn't ready yet then check if we are now after activating this bundle. p.checkPluginReadiness() - return + return nil } if etag, ok := p.etags[name]; ok && u.ETag == etag { @@ -571,8 +573,10 @@ func (p *Plugin) process(ctx context.Context, name string, u download.Update) { // The downloader received a 304 (same etag as saved in local state), update plugin readiness p.checkPluginReadiness() - return + return nil } + + return nil } func (p *Plugin) checkPluginReadiness() { @@ -801,13 +805,13 @@ type fileLoader struct { path string bvc *bundle.VerificationConfig sizeLimitBytes int64 - f func(context.Context, string, download.Update) + f func(context.Context, string, download.Update) error bundleParserOpts ast.ParserOptions } func (fl *fileLoader) Start(ctx context.Context) { go func() { - fl.oneShot(ctx) + _ = fl.oneShot(ctx) }() } @@ -821,19 +825,17 @@ func (*fileLoader) SetCache(string) { } func (fl *fileLoader) Trigger(ctx context.Context) error { - fl.oneShot(ctx) - return nil + return fl.oneShot(ctx) } -func (fl *fileLoader) oneShot(ctx context.Context) { +func (fl *fileLoader) oneShot(ctx context.Context) (err error) { var u download.Update u.Metrics = metrics.New() info, err := os.Stat(fl.path) u.Error = err if err != nil { - fl.f(ctx, fl.name, u) - return + return fl.f(ctx, fl.name, u) } var reader *bundle.Reader @@ -841,13 +843,15 @@ func (fl *fileLoader) oneShot(ctx context.Context) { if info.IsDir() { reader = bundle.NewCustomReader(bundle.NewDirectoryLoader(fl.path)) } else { - f, err := os.Open(fl.path) + var f *os.File + f, err = os.Open(fl.path) u.Error = err if err != nil { - fl.f(ctx, fl.name, u) - return + return fl.f(ctx, fl.name, u) } - defer f.Close() + defer func(f *os.File) { + err = errors.Join(err, f.Close()) + }(f) reader = bundle.NewReader(f) } @@ -862,5 +866,5 @@ func (fl *fileLoader) oneShot(ctx context.Context) { if err == nil { u.Bundle = &b } - fl.f(ctx, fl.name, u) + return fl.f(ctx, fl.name, u) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/discovery/discovery.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/discovery/discovery.go index 36b3c06a5..7ed36acf9 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/discovery/discovery.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/discovery/discovery.go @@ -317,7 +317,7 @@ func saveCurrentBundleToDisk(path string, raw io.Reader) (string, error) { return bundleUtils.SaveBundleToDisk(path, raw) } -func (c *Discovery) oneShot(ctx context.Context, u download.Update) { +func (c *Discovery) oneShot(ctx context.Context, u download.Update) error { c.processUpdate(ctx, u) if p := status.Lookup(c.manager); p != nil { @@ -330,6 +330,8 @@ func (c *Discovery) oneShot(ctx context.Context, u download.Update) { for _, f := range c.listeners { f(*c.status) } + + return nil } func (c *Discovery) processUpdate(ctx context.Context, u download.Update) { diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/encoder.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/encoder.go index 408643cbe..740375b80 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/encoder.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/encoder.go @@ -135,7 +135,7 @@ func (enc *chunkEncoder) Encode(event EventV1, eventBytes []byte) ([][]byte, err } } - if int64(len(eventBytes)+enc.bytesWritten+1) <= enc.uncompressedLimit { + if int64(len(eventBytes)+enc.bytesWritten+1) < enc.uncompressedLimit { return nil, enc.appendEvent(eventBytes) } @@ -167,16 +167,12 @@ func (enc *chunkEncoder) Encode(event EventV1, eventBytes []byte) ([][]byte, err } currentSize := len(result) - if currentSize < int(enc.limit) { + if currentSize <= int(enc.limit) { // success! the incoming chunk doesn't have to lose the ND cache and can go into a chunk by itself // scale up the uncompressed limit using the uncompressed event size as a base - err = enc.appendEvent(eventBytes) - if err != nil { - return nil, err - } enc.uncompressedLimit = int64(len(eventBytes)) enc.scaleUp() - return nil, nil + return [][]byte{result}, nil } // The ND cache has to be dropped, record this size as a known maximum event size @@ -185,7 +181,6 @@ func (enc *chunkEncoder) Encode(event EventV1, eventBytes []byte) ([][]byte, err } // 2. Drop the ND cache and see if the incoming event can fit within the current chunk without the cache (so we can maximize chunk size) - enc.initialize() enc.incrMetric(encLogExUploadSizeLimitCounterName) // If there's no ND builtins cache in the event, then we don't need to retry encoding anything. if event.NDBuiltinCache == nil { diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/eventBuffer.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/eventBuffer.go index b7ac610b6..a0ed84fbd 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/eventBuffer.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/eventBuffer.go @@ -7,12 +7,14 @@ package logs import ( "context" "encoding/json" + "math" "sync" "github.com/open-policy-agent/opa/v1/logging" "github.com/open-policy-agent/opa/v1/metrics" "github.com/open-policy-agent/opa/v1/plugins/rest" "github.com/open-policy-agent/opa/v1/util" + "golang.org/x/time/rate" ) type bufferItem struct { @@ -22,32 +24,40 @@ type bufferItem struct { // eventBuffer stores and uploads a gzip compressed JSON array of EventV1 entries type eventBuffer struct { - buffer chan *bufferItem // buffer stores JSON encoded EventV1 data - upload sync.Mutex // upload controls that uploads are done sequentially - client rest.Client // client is used to upload the data to the configured service - uploadPath string // uploadPath is the configured HTTP resource path for upload - enc *chunkEncoder // encoder appends events into the gzip compressed JSON array - metrics metrics.Metrics - logger logging.Logger + buffer chan *bufferItem // buffer stores JSON encoded EventV1 data + upload sync.Mutex // upload controls that uploads are done sequentially + enc *chunkEncoder // encoder appends events into the gzip compressed JSON array + limiter *rate.Limiter + metrics metrics.Metrics + logger logging.Logger } -func newEventBuffer(bufferSizeLimitEvents int64, client rest.Client, uploadPath string, uploadSizeLimitBytes int64) *eventBuffer { +func newEventBuffer(bufferSizeLimitEvents int64, uploadSizeLimitBytes int64) *eventBuffer { return &eventBuffer{ - buffer: make(chan *bufferItem, bufferSizeLimitEvents), - client: client, - uploadPath: uploadPath, - enc: newChunkEncoder(uploadSizeLimitBytes), + buffer: make(chan *bufferItem, bufferSizeLimitEvents), + enc: newChunkEncoder(uploadSizeLimitBytes), } } -func (b *eventBuffer) WithMetrics(m metrics.Metrics) *eventBuffer { - b.metrics = m - b.enc.metrics = m +func (b *eventBuffer) WithLimiter(maxDecisionsPerSecond *float64) *eventBuffer { + if maxDecisionsPerSecond != nil { + b.limiter = rate.NewLimiter(rate.Limit(*maxDecisionsPerSecond), int(math.Max(1, *maxDecisionsPerSecond))) + } return b } +func (b *eventBuffer) WithMetrics(m metrics.Metrics) { + b.metrics = m + b.enc = b.enc.WithMetrics(m) +} + +func (*eventBuffer) Name() string { + return eventBufferType +} + func (b *eventBuffer) WithLogger(l logging.Logger) *eventBuffer { b.logger = l + b.enc = b.enc.WithLogger(l) return b } @@ -60,20 +70,25 @@ func (b *eventBuffer) incrMetric(name string) { // Reconfigure updates the user configurable values // This cannot be called concurrently, this could change the underlying channel. // Plugin manages a lock to control this so that changes to both buffer types can be managed sequentially. -func (b *eventBuffer) Reconfigure(bufferSizeLimitEvents int64, client rest.Client, uploadPath string, uploadSizeLimitBytes int64) { +func (b *eventBuffer) Reconfigure(bufferSizeLimitEvents int64, uploadSizeLimitBytes int64, maxDecisionsPerSecond *float64) { // prevent an upload from pushing events that failed to upload back into a closed buffer b.upload.Lock() defer b.upload.Unlock() - b.client = client - b.uploadPath = uploadPath + if maxDecisionsPerSecond != nil { + b.limiter = rate.NewLimiter(rate.Limit(*maxDecisionsPerSecond), int(math.Max(1, *maxDecisionsPerSecond))) + } else if b.limiter != nil { + b.limiter = nil + } + + if b.enc.limit != uploadSizeLimitBytes { + b.enc.Reconfigure(uploadSizeLimitBytes) + } if int64(cap(b.buffer)) == bufferSizeLimitEvents { return } - b.enc.Reconfigure(uploadSizeLimitBytes) - close(b.buffer) oldBuffer := b.buffer b.buffer = make(chan *bufferItem, bufferSizeLimitEvents) @@ -90,13 +105,19 @@ func (b *eventBuffer) Push(event *EventV1) { } func (b *eventBuffer) push(event *bufferItem) { + if b.limiter != nil && !b.limiter.Allow() { + b.incrMetric(logRateLimitExDropCounterName) + b.logger.Error("Decision log dropped as rate limit exceeded. Reduce reporting interval or increase rate limit.") + return + } + util.PushFIFO(b.buffer, event, b.metrics, logBufferEventDropCounterName) } // Upload reads events from the buffer and uploads them to the configured client. // All the events currently in the buffer are read and written to a gzip compressed JSON array to create a chunk of data. // Each chunk is limited by the uploadSizeLimitBytes. -func (b *eventBuffer) Upload(ctx context.Context) error { +func (b *eventBuffer) Upload(ctx context.Context, client rest.Client, uploadPath string) error { b.upload.Lock() defer b.upload.Unlock() @@ -106,21 +127,22 @@ func (b *eventBuffer) Upload(ctx context.Context) error { } for range eventLen { - event := b.readEvent() - if event == nil { + bufItem := b.readBufItem() + if bufItem == nil { break } var result [][]byte - if event.chunk != nil { - result = [][]byte{event.chunk} + if bufItem.chunk != nil { + result = [][]byte{bufItem.chunk} } else { + event := bufItem.EventV1 eventBytes, err := json.Marshal(&event) if err != nil { return err } - result, err = b.enc.Encode(*event.EventV1, eventBytes) + result, err = b.enc.Encode(*event, eventBytes) if err != nil { b.incrMetric(logEncodingFailureCounterName) if b.logger != nil { @@ -129,7 +151,7 @@ func (b *eventBuffer) Upload(ctx context.Context) error { } } - if err := b.uploadChunks(ctx, result); err != nil { + if err := b.uploadChunks(ctx, result, client, uploadPath); err != nil { return err } } @@ -144,7 +166,7 @@ func (b *eventBuffer) Upload(ctx context.Context) error { return nil } - if err := b.uploadChunks(ctx, result); err != nil { + if err := b.uploadChunks(ctx, result, client, uploadPath); err != nil { return err } @@ -153,10 +175,10 @@ func (b *eventBuffer) Upload(ctx context.Context) error { // uploadChunks attempts to upload multiple chunks to the configured client. // In case of failure all the events are added back to the buffer. -func (b *eventBuffer) uploadChunks(ctx context.Context, result [][]byte) error { +func (b *eventBuffer) uploadChunks(ctx context.Context, result [][]byte, client rest.Client, uploadPath string) error { var finalErr error for _, chunk := range result { - err := uploadChunk(ctx, b.client, b.uploadPath, chunk) + err := uploadChunk(ctx, client, uploadPath, chunk) // if an upload failed, requeue the chunk if err != nil { @@ -168,7 +190,7 @@ func (b *eventBuffer) uploadChunks(ctx context.Context, result [][]byte) error { } // readEvent does a nonblocking read from the event buffer -func (b *eventBuffer) readEvent() *bufferItem { +func (b *eventBuffer) readBufItem() *bufferItem { select { case event := <-b.buffer: return event diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/plugin.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/plugin.go index fd6909bc4..c88f3323a 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/plugin.go +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/plugin.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "fmt" - "math" "math/rand" "net/url" "reflect" @@ -19,8 +18,6 @@ import ( "sync" "time" - "golang.org/x/time/rate" - "github.com/open-policy-agent/opa/internal/ref" "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/logging" @@ -455,25 +452,28 @@ func (c *Config) validateAndInjectDefaults(services []string, pluginsList []stri return nil } +type buffer interface { + Name() string + Push(*EventV1) + Upload(context.Context, rest.Client, string) error + Reconfigure(int64, int64, *float64) + WithMetrics(metrics.Metrics) +} + // Plugin implements decision log buffering and uploading. type Plugin struct { - manager *plugins.Manager - config Config - runningBuffer string - reconfigMtx sync.RWMutex // reconfigMtx blocks reads/writes on buffer reconfiguration - eventBuffer *eventBuffer - buffer *logBuffer - enc *chunkEncoder - mtx sync.Mutex - statusMtx sync.Mutex - stop chan chan struct{} - reconfig chan reconfigure - preparedMask prepareOnce - preparedDrop prepareOnce - limiter *rate.Limiter - metrics metrics.Metrics - logger logging.Logger - status *lstat.Status + manager *plugins.Manager + config Config + reconfigMtx sync.RWMutex // reconfigMtx blocks reads/writes on buffer reconfiguration + b buffer + statusMtx sync.Mutex + stop chan chan struct{} + reconfig chan reconfigure + preparedMask prepareOnce + preparedDrop prepareOnce + metrics metrics.Metrics + logger logging.Logger + status *lstat.Status } type prepareOnce struct { @@ -584,12 +584,10 @@ func (b *ConfigBuilder) Parse() (*Config, error) { // New returns a new Plugin with the given config. func New(parsedConfig *Config, manager *plugins.Manager) *Plugin { - plugin := &Plugin{ manager: manager, config: *parsedConfig, stop: make(chan chan struct{}), - enc: newChunkEncoder(*parsedConfig.Reporting.UploadSizeLimitBytes), reconfig: make(chan reconfigure), logger: manager.Logger().WithFields(map[string]any{"plugin": Name}), status: &lstat.Status{}, @@ -597,25 +595,16 @@ func New(parsedConfig *Config, manager *plugins.Manager) *Plugin { preparedMask: *newPrepareOnce(), } - plugin.enc.WithLogger(plugin.logger) - switch parsedConfig.Reporting.BufferType { case eventBufferType: - plugin.eventBuffer = newEventBuffer( + plugin.b = newEventBuffer( *parsedConfig.Reporting.BufferSizeLimitEvents, - plugin.manager.Client(plugin.config.Service), - *parsedConfig.Resource, *parsedConfig.Reporting.UploadSizeLimitBytes, - ).WithLogger(plugin.logger).WithMetrics(plugin.metrics) - plugin.runningBuffer = eventBufferType + ).WithLogger(plugin.logger).WithLimiter(parsedConfig.Reporting.MaxDecisionsPerSecond) case sizeBufferType: - plugin.buffer = newLogBuffer(*parsedConfig.Reporting.BufferSizeLimitBytes) - plugin.runningBuffer = sizeBufferType - } - - if parsedConfig.Reporting.MaxDecisionsPerSecond != nil { - limit := *parsedConfig.Reporting.MaxDecisionsPerSecond - plugin.limiter = rate.NewLimiter(rate.Limit(limit), int(math.Max(1, limit))) + plugin.b = newSizeBuffer(*parsedConfig.Reporting.BufferSizeLimitBytes, + *parsedConfig.Reporting.UploadSizeLimitBytes, + ).WithLogger(plugin.logger).WithLimiter(parsedConfig.Reporting.MaxDecisionsPerSecond) } manager.RegisterCompilerTrigger(plugin.compilerUpdated) @@ -628,7 +617,7 @@ func New(parsedConfig *Config, manager *plugins.Manager) *Plugin { // WithMetrics sets the global metrics provider to be used by the plugin. func (p *Plugin) WithMetrics(m metrics.Metrics) *Plugin { p.metrics = m - p.enc.WithMetrics(m) + p.b.WithMetrics(m) return p } @@ -679,7 +668,7 @@ func (p *Plugin) flushDecisions(ctx context.Context) { go func(ctx context.Context, done chan bool) { for ctx.Err() == nil { - if err := p.oneShot(ctx); err != nil && !errors.Is(err, &bufferEmpty{}) { + if err := p.b.Upload(ctx, p.manager.Client(p.config.Service), *p.config.Resource); err != nil && !errors.Is(err, &bufferEmpty{}) { p.logger.Error("Error flushing decisions: %s", err) // Wait some before retrying, but skip incrementing interval since we are shutting down time.Sleep(1 * time.Second) @@ -771,7 +760,6 @@ func (p *Plugin) Log(ctx context.Context, decision *server.Info) error { } if err := p.maskEvent(ctx, decision.Txn, input, &event); err != nil { - // TODO(tsandall): see note below about error handling. p.logger.Error("Log event masking failed: %v.", err) return nil } @@ -783,7 +771,7 @@ func (p *Plugin) Log(ctx context.Context, decision *server.Info) error { } if p.config.Service != "" { - p.encodeAndBufferEvent(event) + p.push(event) } if p.config.Plugin != nil { @@ -902,8 +890,7 @@ func (*bufferEmpty) Error() string { } func (p *Plugin) doOneShot(ctx context.Context) error { - err := p.oneShot(ctx) - + err := p.b.Upload(ctx, p.manager.Client(p.config.Service), *p.config.Resource) if err != nil { if errors.Is(err, &bufferEmpty{}) { p.logger.Debug("Log upload queue was empty.") @@ -919,65 +906,6 @@ func (p *Plugin) doOneShot(ctx context.Context) error { return err } -func (p *Plugin) oneShot(ctx context.Context) error { - if p.runningBuffer == eventBufferType { - return p.eventBuffer.Upload(ctx) - } - - // Make a local copy of the plugin's encoder and buffer and create - // a new encoder and buffer. This is needed as locking the buffer for - // the upload duration will block policy evaluation and result in - // increased latency for OPA clients - p.mtx.Lock() - oldChunkEnc := p.enc - oldBuffer := p.buffer - p.buffer = newLogBuffer(*p.config.Reporting.BufferSizeLimitBytes) - p.enc = newChunkEncoder(*p.config.Reporting.UploadSizeLimitBytes).WithMetrics(p.metrics).WithLogger(p.logger). - WithUncompressedLimit(oldChunkEnc.uncompressedLimit, oldChunkEnc.uncompressedLimitScaleDownExponent, oldChunkEnc.uncompressedLimitScaleUpExponent) - p.mtx.Unlock() - - // Along with uploading the compressed events in the buffer - // to the remote server, flush any pending compressed data to the - // underlying writer and add to the buffer. - chunk, err := oldChunkEnc.Flush() - if err != nil { - return err - } - - for _, ch := range chunk { - p.bufferChunk(oldBuffer, ch) - } - - if oldBuffer.Len() == 0 { - return &bufferEmpty{} - } - - for bs := oldBuffer.Pop(); bs != nil; bs = oldBuffer.Pop() { - if err == nil { - err = uploadChunk(ctx, p.manager.Client(p.config.Service), *p.config.Resource, bs) - } - if err != nil { - if p.limiter != nil { - events, decErr := newChunkDecoder(bs).decode() - if decErr != nil { - continue - } - - for i := range events { - p.encodeAndBufferEvent(events[i]) - } - } else { - // requeue the chunk - p.mtx.Lock() - p.bufferChunk(p.buffer, bs) - p.mtx.Unlock() - } - } - } - - return err -} - func (p *Plugin) reconfigure(ctx context.Context, config any) { newConfig := config.(*Config) @@ -992,87 +920,52 @@ func (p *Plugin) reconfigure(ctx context.Context, config any) { p.reconfigMtx.Lock() defer p.reconfigMtx.Unlock() - switch newConfig.Reporting.BufferType { - case eventBufferType: - if p.eventBuffer == nil { - p.eventBuffer = newEventBuffer( - *p.config.Reporting.BufferSizeLimitEvents, - p.manager.Client(p.config.Service), - *p.config.Resource, - *p.config.Reporting.UploadSizeLimitBytes).WithLogger(p.logger).WithMetrics(p.metrics) - } else { - p.eventBuffer.Reconfigure( - *p.config.Reporting.BufferSizeLimitEvents, - p.manager.Client(p.config.Service), - *p.config.Resource, - *p.config.Reporting.UploadSizeLimitBytes) - } - - if p.runningBuffer == sizeBufferType { - if err := p.oneShot(ctx); err != nil && !errors.Is(err, &bufferEmpty{}) { - p.setStatus(err) - } + // User reconfigured the type of buffer + if p.b.Name() != newConfig.Reporting.BufferType { + // upload all events in the current buffer type + if err := p.b.Upload(ctx, p.manager.Client(p.config.Service), *p.config.Resource); err != nil && !errors.Is(err, &bufferEmpty{}) { + p.setStatus(err) } - p.runningBuffer = eventBufferType - case sizeBufferType: - if p.runningBuffer == eventBufferType { - if err := p.eventBuffer.Upload(ctx); err != nil && !errors.Is(err, &bufferEmpty{}) { - p.setStatus(err) - } + switch newConfig.Reporting.BufferType { + case eventBufferType: + p.b = newEventBuffer( + *p.config.Reporting.BufferSizeLimitEvents, + *p.config.Reporting.UploadSizeLimitBytes, + ).WithLogger(p.logger).WithLimiter(p.config.Reporting.MaxDecisionsPerSecond) + case sizeBufferType: + p.b = newSizeBuffer( + *p.config.Reporting.BufferSizeLimitBytes, + *p.config.Reporting.UploadSizeLimitBytes, + ).WithLogger(p.logger).WithLimiter(p.config.Reporting.MaxDecisionsPerSecond) } - if p.buffer == nil { - p.buffer = newLogBuffer(*p.config.Reporting.BufferSizeLimitBytes) - } + p.b.WithMetrics(p.metrics) - p.runningBuffer = sizeBufferType + return } -} -// NOTE(philipc): Because ND builtins caching can cause unbounded growth in -// decision log entry size, we do best-effort event encoding here, and when we -// run out of space, we drop the ND builtins cache, and try encoding again. -func (p *Plugin) encodeAndBufferEvent(event EventV1) { - if p.limiter != nil && !p.limiter.Allow() { - p.incrMetric(logRateLimitExDropCounterName) - p.logger.Error("Decision log dropped as rate limit exceeded. Reduce reporting interval or increase rate limit.") - return + var limit int64 + + switch p.config.Reporting.BufferType { + case eventBufferType: + limit = *p.config.Reporting.BufferSizeLimitEvents + case sizeBufferType: + limit = *p.config.Reporting.UploadSizeLimitBytes } + p.b.Reconfigure( + limit, + *p.config.Reporting.UploadSizeLimitBytes, + p.config.Reporting.MaxDecisionsPerSecond, + ) +} +func (p *Plugin) push(event EventV1) { // only blocks when the buffer is being reconfigured p.reconfigMtx.RLock() defer p.reconfigMtx.RUnlock() - if p.runningBuffer == eventBufferType { - p.eventBuffer.Push(&event) - return - } - - eventBytes, err := json.Marshal(&event) - if err != nil { - p.logger.Error("Decision log dropped due to error serializing event to JSON: %v", err) - return - } - - p.mtx.Lock() - defer p.mtx.Unlock() - result, err := p.enc.Encode(event, eventBytes) - if err != nil { - return - } - for _, chunk := range result { - p.bufferChunk(p.buffer, chunk) - } -} - -func (p *Plugin) bufferChunk(buffer *logBuffer, bs []byte) { - dropped := buffer.Push(bs) - if dropped > 0 { - p.incrMetric(logBufferEventDropCounterName) - p.incrMetric(logBufferSizeLimitExDropCounterName) - p.logger.Error("Dropped %v chunks from buffer. Reduce reporting interval or increase buffer size.", dropped) - } + p.b.Push(&event) } func (p *Plugin) maskEvent(ctx context.Context, txn storage.Transaction, input ast.Value, event *EventV1) error { @@ -1207,12 +1100,6 @@ func (p *Plugin) logEvent(event EventV1) error { return nil } -func (p *Plugin) incrMetric(name string) { - if p.metrics != nil { - p.metrics.Counter(name).Incr() - } -} - func (p *Plugin) setStatus(err error) { p.statusMtx.Lock() p.status.SetError(err) diff --git a/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/sizeBuffer.go b/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/sizeBuffer.go new file mode 100644 index 000000000..6be6e379f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/plugins/logs/sizeBuffer.go @@ -0,0 +1,164 @@ +package logs + +import ( + "context" + "encoding/json" + "math" + "sync" + + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/plugins/rest" + "golang.org/x/time/rate" +) + +type sizeBuffer struct { + mtx sync.Mutex + buffer *logBuffer + BufferSizeLimitBytes int64 + UploadSizeLimitBytes int64 + enc *chunkEncoder // encoder appends events into the gzip compressed JSON array + limiter *rate.Limiter + metrics metrics.Metrics + logger logging.Logger +} + +func newSizeBuffer(bufferSizeLimitBytes int64, uploadSizeLimitBytes int64) *sizeBuffer { + return &sizeBuffer{ + enc: newChunkEncoder(uploadSizeLimitBytes), + buffer: newLogBuffer(bufferSizeLimitBytes), + BufferSizeLimitBytes: uploadSizeLimitBytes, + UploadSizeLimitBytes: uploadSizeLimitBytes, + } +} + +func (b *sizeBuffer) WithLimiter(maxDecisionsPerSecond *float64) *sizeBuffer { + if maxDecisionsPerSecond != nil { + b.limiter = rate.NewLimiter(rate.Limit(*maxDecisionsPerSecond), int(math.Max(1, *maxDecisionsPerSecond))) + } + return b +} + +func (b *sizeBuffer) WithMetrics(m metrics.Metrics) { + b.metrics = m + b.enc.metrics = m +} + +func (b *sizeBuffer) WithLogger(l logging.Logger) *sizeBuffer { + b.logger = l + b.enc.logger = l + return b +} + +func (*sizeBuffer) Name() string { + return sizeBufferType +} + +func (b *sizeBuffer) incrMetric(name string) { + if b.metrics != nil { + b.metrics.Counter(name).Incr() + } +} + +func (b *sizeBuffer) Reconfigure(bufferSizeLimitBytes int64, uploadSizeLimitBytes int64, maxDecisionsPerSecond *float64) { + b.mtx.Lock() + defer b.mtx.Unlock() + + if maxDecisionsPerSecond != nil { + b.limiter = rate.NewLimiter(rate.Limit(*maxDecisionsPerSecond), int(math.Max(1, *maxDecisionsPerSecond))) + } else if b.limiter != nil { + b.limiter = nil + } + + // The encoder and buffer will use these new values after upload + b.UploadSizeLimitBytes = uploadSizeLimitBytes + b.BufferSizeLimitBytes = bufferSizeLimitBytes +} + +func (b *sizeBuffer) Push(event *EventV1) { + if b.limiter != nil && !b.limiter.Allow() { + b.incrMetric(logRateLimitExDropCounterName) + b.logger.Error("Decision log dropped as rate limit exceeded. Reduce reporting interval or increase rate limit.") + return + } + + eventBytes, err := json.Marshal(&event) + if err != nil { + b.logger.Error("Decision log dropped due to error serializing event to JSON: %v", err) + return + } + + b.mtx.Lock() + defer b.mtx.Unlock() + result, err := b.enc.Encode(*event, eventBytes) + if err != nil { + return + } + for _, chunk := range result { + b.bufferChunk(b.buffer, chunk) + } +} + +func (b *sizeBuffer) Upload(ctx context.Context, client rest.Client, uploadPath string) error { + // Make a local copy of the plugin's encoder and buffer and create + // a new encoder and buffer. This is needed as locking the buffer for + // the upload duration will block policy evaluation and result in + // increased latency for OPA clients + b.mtx.Lock() + oldChunkEnc := b.enc + oldBuffer := b.buffer + b.buffer = newLogBuffer(b.BufferSizeLimitBytes) + b.enc = newChunkEncoder(b.UploadSizeLimitBytes).WithMetrics(b.metrics).WithLogger(b.logger). + WithUncompressedLimit(oldChunkEnc.uncompressedLimit, oldChunkEnc.uncompressedLimitScaleDownExponent, oldChunkEnc.uncompressedLimitScaleUpExponent) + b.mtx.Unlock() + + // Along with uploading the compressed events in the buffer + // to the remote server, flush any pending compressed data to the + // underlying writer and add to the buffer. + chunk, err := oldChunkEnc.Flush() + if err != nil { + return err + } + + for _, ch := range chunk { + b.bufferChunk(oldBuffer, ch) + } + + if oldBuffer.Len() == 0 { + return &bufferEmpty{} + } + + for bs := oldBuffer.Pop(); bs != nil; bs = oldBuffer.Pop() { + if err == nil { + err = uploadChunk(ctx, client, uploadPath, bs) + } + if err != nil { + if b.limiter != nil { + events, decErr := newChunkDecoder(bs).decode() + if decErr != nil { + continue + } + + for i := range events { + b.Push(&events[i]) + } + } else { + // requeue the chunk + b.mtx.Lock() + b.bufferChunk(b.buffer, bs) + b.mtx.Unlock() + } + } + } + + return err +} + +func (b *sizeBuffer) bufferChunk(buffer *logBuffer, bs []byte) { + dropped := buffer.Push(bs) + if dropped > 0 { + b.incrMetric(logBufferEventDropCounterName) + b.incrMetric(logBufferSizeLimitExDropCounterName) + b.logger.Error("Dropped %v chunks from buffer. Reduce reporting interval or increase buffer size.", dropped) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/compile/compile.go b/vendor/github.com/open-policy-agent/opa/v1/rego/compile/compile.go new file mode 100644 index 000000000..7fd217757 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/compile/compile.go @@ -0,0 +1,360 @@ +// Package compile collects a specialized interface to package rego, built for +// compiling policies into filters. It's a combination of simple evals (of any +// masking rule), and partial eval, equipped with the correct settings for some +// options; and paired with post-checks that determine if the result of partial +// evaluation can be translated into filter queries for certain targets/dialects. +// On success, the PE results are translated into queries, i.e. SQL WHERE clauses +// or UCAST expressions. +package compile + +import ( + "context" + "fmt" + + "github.com/open-policy-agent/opa/internal/compile" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/rego" + + // "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" +) + +type CompileOption func(*Compile) + +type Compile struct { + targets []string + dialects []string + maskRule ast.Ref + unknowns []*ast.Term // ast.Ref would be slightly more on-the-spot, but we follow what is done in v1/rego to minimise surprises. + query ast.Body + mappings map[string]any + metrics metrics.Metrics + + regoOpts []func(*rego.Rego) +} + +// Rego allows passing through common `*rego.Rego` options +func Rego(o ...func(*rego.Rego)) CompileOption { + return func(c *Compile) { + c.regoOpts = append(c.regoOpts, o...) + } +} + +// Target lets you control the targets of a filter compilation. If repeated, +// it'll apply constraints for all the targets simultaneously (i.e. the +// union of their constraints = the intersection of supported features). +func Target(target, dialect string) CompileOption { + return func(c *Compile) { + c.targets = append(c.targets, target) + c.dialects = append(c.dialects, dialect) + } +} + +// MaskRule determines which rule of the provided modules is to be evaluated +// to determine the masking of columns. Applying those masking rules is an out- +// of-band concern, when processing the results of the query. +func MaskRule(rule ast.Ref) CompileOption { + return func(c *Compile) { + c.maskRule = rule + } +} + +// Mappings allows controlling the table and column names of the generated +// queries, if they don't match what's in the policy's unknowns. +// These can be simple maps, like +// +// { +// fruit: { +// $self: "fruit_table", +// name: "name_col", +// } +// } +// +// or per-target/per-dialect, +// +// { +// sql: { // per-target +// fruit: { +// $self: "fruit_table", +// name: "name_col", +// } +// } +// } +// +// { +// postgresql: { // per-dialect +// fruit: { +// $self: "fruit_table", +// name: "name_col", +// } +// } +// } +func Mappings(m map[string]any) CompileOption { + return func(c *Compile) { + c.mappings = m + } +} + +// Metrics allows passing the `metrics.Metrics` to use for recording timers. +// It's passed along to the underlying `rego.Rego` evals, too. +func Metrics(m metrics.Metrics) CompileOption { + return func(c *Compile) { + c.metrics = m + } +} + +// ParsedUnknowns lets you pass in the unknowns of this filter compilation. +func ParsedUnknowns(s ...*ast.Term) CompileOption { + return func(c *Compile) { + c.unknowns = s + } +} + +// ParsedQuery lets you pass in the main entrypoint of this filter compilation. +func ParsedQuery(q ast.Body) CompileOption { + return func(c *Compile) { + c.query = q + } +} + +// New creates a new `*compile.Compile` struct. +func New(opts ...CompileOption) *Compile { + c := &Compile{} + for i := range opts { + opts[i](c) + } + c.regoOpts = append(c.regoOpts, + rego.Metrics(c.metrics), + // We require evaluating non-det builtins for the translated targets: + // We're not able to meaningfully tanslate things like http.send, sql.send, or + // io.jwt.decode_verify into SQL or UCAST, so we try to eval them out where possible. + rego.NondeterministicBuiltins(true), + ) + return c +} + +// Prepared represents a ready-for-eval intermediate state where everything not depending +// on input has already been done. +type Prepared struct { + compile *Compile // carry along + + constraintSet *compile.ConstraintSet + shorts compile.Set[string] + regoPrepareOptions []rego.PrepareOption + preparedMaskQuery *rego.PreparedEvalQuery + preparedPartialQuery *rego.PreparedPartialQuery +} + +type PrepareOption func(*Prepared) + +// RegoPrepareOptions lets you pass through any `rego.PrepareOption`. +func RegoPrepareOptions(o ...rego.PrepareOption) PrepareOption { + return func(p *Prepared) { + p.regoPrepareOptions = append(p.regoPrepareOptions, o...) + } +} + +// noop allows us to call c.timer() for metrics even if we have no metrics +type noop struct{} + +var n = noop{} + +type minimetrics interface { + Start() + Stop() int64 +} + +func (noop) Start() {} +func (noop) Stop() int64 { return 0 } + +func (c *Compile) timer(t string) minimetrics { + if c.metrics != nil { + return c.metrics.Timer(t) + } + return n +} + +// Prepare evaluates as much as possible without knowing the (known) input yet +func (c *Compile) Prepare(ctx context.Context, po ...PrepareOption) (*Prepared, error) { + p := &Prepared{ + compile: c, + } + for i := range po { + po[i](p) + } + + // constraints + c.timer(metrics.CompileEvalConstraints).Start() + defer c.timer(metrics.CompileEvalConstraints).Stop() + constrs := make([]*compile.Constraint, len(p.compile.targets)) + for i := range p.compile.targets { + var err error + constrs[i], err = compile.NewConstraints(p.compile.targets[i], p.compile.dialects[i]) // NewConstraints validates the tuples + if err != nil { + return nil, err + } + } + p.constraintSet = compile.NewConstraintSet(constrs...) + p.shorts = compile.ShortsFromMappings(p.compile.mappings) + + // mask prep + if c.maskRule != nil { + rp, err := rego.New(append(c.regoOpts, + rego.ParsedQuery(ast.NewBody(ast.NewExpr(ast.NewTerm(c.maskRule)))), + )...).PrepareForEval(ctx, p.regoPrepareOptions...) + if err != nil { + return nil, fmt.Errorf("prepare for mask eval: %w", err) + } + p.preparedMaskQuery = &rp + } + + // PE prep + rp, err := rego.New(append(c.regoOpts, + rego.ParsedQuery(c.query), + )...).PrepareForPartial(ctx, p.regoPrepareOptions...) + if err != nil { + return nil, fmt.Errorf("prepare for partial: %w", err) + } + + p.preparedPartialQuery = &rp + return p, nil +} + +// Filter represents the result of a policy-to-filter compilation for one +// specific target/dialect +type Filter struct { + Query any + Masks map[string]any +} + +// Filters represents all the filters compiled from a policy. Can contain +// `compile.Filter` for various target/dialect combinations. +type Filters struct { + filters map[string]map[string]Filter +} + +func (f *Filters) push(target, dialect string, query any, masks map[string]any) { + if f.filters == nil { + f.filters = make(map[string]map[string]Filter) + } + if f.filters[target] == nil { + f.filters[target] = make(map[string]Filter) + } + f.filters[target][dialect] = Filter{Query: query, Masks: masks} +} + +// For is a helper method to retrieve the `compile.Filter` for a specific +// target/dialect. +func (f *Filters) For(target, dialect string) Filter { + return f.filters[target][dialect] +} + +// One is a helper that extracts a single `compile.Filter`. Only safe to use +// when filters have been compiled for a single target/dialect, otherwise it'll +// return a random result of the compile filters. Panics when there's no filter +// at all. +func (f *Filters) One() Filter { + for i := range f.filters { + for j := range f.filters[i] { + return f.filters[i][j] + } + } + panic("empty filters") +} + +// Compile does all the steps needed to generate `*compile.Filters` from a +// prepared state (`*compile.Prepared`). +func (p *Prepared) Compile(ctx context.Context, eo ...rego.EvalOption) (*Filters, error) { + // mask eval (non-partial) + var maskResult map[string]any + maskOpts := append(eo, + rego.EvalMetrics(p.compile.metrics), + rego.EvalRuleIndexing(false), + rego.EvalNondeterministicBuiltins(true), + ) + if p.preparedMaskQuery != nil { + p.compile.timer(metrics.CompileEvalMaskRule).Start() + rs, err := p.preparedMaskQuery.Eval(ctx, maskOpts...) + if err != nil { + return nil, fmt.Errorf("evaluate masks: %w", err) + } + if len(rs) != 0 { + maskResultValue, err := ast.InterfaceToValue(rs[0].Expressions[0].Value) + if err != nil { + return nil, fmt.Errorf("convert masks: %w", err) + } + if err := util.Unmarshal([]byte(maskResultValue.String()), &maskResult); err != nil { + return nil, fmt.Errorf("convert masks: %w", err) + } + } + p.compile.timer(metrics.CompileEvalMaskRule).Stop() + } + + // PE for conversion + opts := append(maskOpts, + rego.EvalParsedUnknowns(p.compile.unknowns), + ) + pq, err := p.preparedPartialQuery.Partial(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("partial eval: %w", err) + } + + if errs := compile.Check(pq, p.constraintSet, p.shorts).ASTErrors(); errs != nil { + return nil, ast.Errors(errs) + } + + p.compile.timer(metrics.CompileTranslateQueries).Start() + defer p.compile.timer(metrics.CompileTranslateQueries).Start() + + ret := Filters{} + for i := range p.compile.targets { + target, dialect := p.compile.targets[i], p.compile.dialects[i] + if pq.Queries == nil { // unconditional NO + ret.push(target, dialect, nil, nil) + continue + } + mappings, err := lookupMappings(p.compile.mappings, target, dialect) + if err != nil { + return nil, fmt.Errorf("mappings: %w", err) + } + switch target { + case "ucast": + query := compile.QueriesToUCAST(pq.Queries, mappings) + ret.push(target, dialect, query.Map(), maskResult) + case "sql": + sql, err := compile.QueriesToSQL(pq.Queries, mappings, dialect) + if err != nil { + return nil, fmt.Errorf("convert to queries: %w", err) + } + ret.push(target, dialect, sql, maskResult) + } + } + + return &ret, nil +} + +func lookupMappings(mappings map[string]any, target, dialect string) (map[string]any, error) { + if mappings == nil { + return nil, nil + } + + if md := mappings[dialect]; md != nil { + m, ok := md.(map[string]any) + if !ok { + return nil, fmt.Errorf("invalid mappings for dialect %s", dialect) + } + if m != nil { + return m, nil + } + } + + if mt := mappings[target]; mt != nil { + n, ok := mt.(map[string]any) + if !ok { + return nil, fmt.Errorf("invalid mappings for target %s", target) + } + return n, nil + } + return mappings, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/repl/repl.go b/vendor/github.com/open-policy-agent/opa/v1/repl/repl.go index 612cfa8b1..4be7681eb 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/repl/repl.go +++ b/vendor/github.com/open-policy-agent/opa/v1/repl/repl.go @@ -164,7 +164,7 @@ func (r *REPL) initModule(ctx context.Context) error { func (r *REPL) WithStderrWriter(w io.Writer) *REPL { r.stderr = w - return nil + return r } // Loop will run until the user enters "exit", Ctrl+C, Ctrl+D, or an unexpected error occurs. @@ -1092,7 +1092,7 @@ func (r *REPL) evalBody(ctx context.Context, compiler *ast.Compiler, input ast.V case "json": return pr.JSON(r.output, output) default: - return pr.Pretty(r.output, output) + return pr.Pretty(r.output, r.stderr, output) } } @@ -1145,7 +1145,7 @@ func (r *REPL) evalPartial(ctx context.Context, compiler *ast.Compiler, input as case "json": return pr.JSON(r.output, output) default: - return pr.Pretty(r.output, output) + return pr.Pretty(r.output, r.stderr, output) } } diff --git a/vendor/github.com/open-policy-agent/opa/v1/runtime/runtime.go b/vendor/github.com/open-policy-agent/opa/v1/runtime/runtime.go index 8e8552c03..53d39cae1 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/runtime/runtime.go +++ b/vendor/github.com/open-policy-agent/opa/v1/runtime/runtime.go @@ -806,7 +806,8 @@ func (rt *Runtime) StartREPL(ctx context.Context) error { repl := repl.New(rt.Store, rt.Params.HistoryPath, rt.Params.Output, rt.Params.OutputFormat, rt.Params.ErrorLimit, banner). WithRuntime(rt.Manager.Info). WithRegoVersion(rt.Params.regoVersion()). - WithInitBundles(rt.loadedPathsResult.Bundles) + WithInitBundles(rt.loadedPathsResult.Bundles). + WithStderrWriter(rt.Params.Output) if rt.Params.Watch { if err := rt.startWatcher(ctx, rt.Params.Paths, onReloadPrinter(rt.Params.Output)); err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/v1/server/compile_handler.go b/vendor/github.com/open-policy-agent/opa/v1/server/compile_handler.go new file mode 100644 index 000000000..35bd72c3d --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/server/compile_handler.go @@ -0,0 +1,608 @@ +// Copyright 2025 The OPA Authors +// SPDX-License-Identifier: Apache-2.0 + +package server + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "slices" + "strings" + + "github.com/open-policy-agent/opa/internal/compile" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/rego" + rego_compile "github.com/open-policy-agent/opa/v1/rego/compile" + "github.com/open-policy-agent/opa/v1/server/failtracer" + "github.com/open-policy-agent/opa/v1/server/types" + "github.com/open-policy-agent/opa/v1/server/writer" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + // decisionLogType is injected under custom.type in the DL entry + decisionLogType = "open-policy-agent/compile" + + invalidUnknownCode = "invalid_unknown" + + // Timer names + timerPrepPartial = metrics.CompilePrepPartial + timerExtractAnnotationsUnknowns = metrics.CompileExtractAnnotationsUnknowns + timerExtractAnnotationsMask = metrics.CompileExtractAnnotationsMask + + unknownsCacheSize = 500 + maskingRuleCacheSize = 500 + + // These need to be kept up to date with `CompileApiKnownHeaders()` below + multiTargetJSON = "application/vnd.opa.multitarget+json" + ucastAllJSON = "application/vnd.opa.ucast.all+json" + ucastMinimalJSON = "application/vnd.opa.ucast.minimal+json" + ucastPrismaJSON = "application/vnd.opa.ucast.prisma+json" + ucastLINQJSON = "application/vnd.opa.ucast.linq+json" + sqlPostgresJSON = "application/vnd.opa.sql.postgresql+json" + sqlMySQLJSON = "application/vnd.opa.sql.mysql+json" + sqlSQLServerJSON = "application/vnd.opa.sql.sqlserver+json" + sqliteJSON = "application/vnd.opa.sql.sqlite+json" + + // back-compat + applicationJSON = "application/json" +) + +func CompileAPIKnownHeaders() []string { + return []string{ + multiTargetJSON, + ucastAllJSON, + ucastMinimalJSON, + ucastPrismaJSON, + ucastLINQJSON, + sqlPostgresJSON, + sqlMySQLJSON, + sqlSQLServerJSON, + sqliteJSON, + } +} + +var allKnownHeaders = append(CompileAPIKnownHeaders(), applicationJSON) + +type CompileResult struct { + Query any `json:"query"` + Masks map[string]any `json:"masks,omitempty"` +} + +// Incoming JSON request body structure. +type CompileFiltersRequestV1 struct { + Input *any `json:"input"` + Query string `json:"query"` + Unknowns *[]string `json:"unknowns"` + Options struct { + DisableInlining []string `json:"disableInlining,omitempty"` + Mappings map[string]any `json:"targetSQLTableMappings,omitempty"` + TargetDialects []string `json:"targetDialects,omitempty"` + MaskRule string `json:"maskRule,omitempty"` + } `json:"options"` +} + +type compileFiltersRequest struct { + Query ast.Body + Input ast.Value + Unknowns []ast.Ref + Options compileFiltersRequestOptions +} + +type compileFiltersRequestOptions struct { + MaskRule ast.Ref `json:"maskRule,omitempty"` + MaskInput ast.Value `json:"maskInput,omitempty"` +} + +type CompileResponseV1 struct { + Result *any `json:"result,omitempty"` + Explanation types.TraceV1 `json:"explanation,omitempty"` + Metrics types.MetricsV1 `json:"metrics,omitempty"` + Hints []failtracer.Hint `json:"hints,omitempty"` +} + +func (s *Server) v1CompileFilters(w http.ResponseWriter, r *http.Request) { + urlPath := r.PathValue("path") + if urlPath == "" { + writer.Error(w, http.StatusBadRequest, types.NewErrorV1(types.CodeInvalidParameter, "missing required 'path' parameter")) + return + } + + ctx := r.Context() + explainMode := getExplain(r.URL, types.ExplainOffV1) + includeInstrumentation := getBoolParam(r.URL, types.ParamInstrumentV1, true) + + m := metrics.New() + m.Timer(metrics.ServerHandler).Start() + m.Timer(metrics.RegoQueryParse).Start() + // decompress the input if sent as zip + body, err := util.ReadMaybeCompressedBody(r) + if err != nil { + writer.Error(w, http.StatusBadRequest, types.NewErrorV1(types.CodeInvalidParameter, "could not decompress the body: %v", err)) + return + } + + comp := s.getCompiler() // used for fuzzy rule name hints, and rego evals further down + + // NOTE(sr): We keep some fields twice: from the unparsed and from the transformed + // request (`orig` and `request` respectively), because otherwise, we'd need to re- + // transform the values back for including them in the decision logs. + orig, request, reqErr := readInputCompileFiltersV1(comp, body, urlPath, s.manager.ParserOptions()) + if reqErr != nil { + writer.Error(w, http.StatusBadRequest, reqErr) + return + } + m.Timer(metrics.RegoQueryParse).Stop() + + c := storage.NewContext().WithMetrics(m) + txn, err := s.store.NewTransaction(ctx, storage.TransactionParams{Context: c}) + if err != nil { + writer.ErrorAuto(w, err) + return + } + + defer s.store.Abort(ctx, txn) + var buf *topdown.BufferTracer + if explainMode != types.ExplainOffV1 { + buf = topdown.NewBufferTracer() + } + + unknowns := request.Unknowns // always wins over annotations if provided + maskingRule := request.Options.MaskRule // always wins over annotations if provided + + // check annotations if one of these is missing, needs own compiler + // NB(sr): we throw this compiler away at the end -- what it produced for us is going to be cached + var annotationsCompiler *ast.Compiler + if len(unknowns) == 0 || maskingRule == nil { + var errs ast.Errors + annotationsCompiler, errs = prepareAnnotations(ctx, comp, s.store, txn, s.manager.ParserOptions()) + if len(errs) > 0 { + writer.Error(w, http.StatusBadRequest, + types.NewErrorV1(types.CodeEvaluation, types.MsgEvaluationError). + WithASTErrors(errs)) + return + } + } + + if len(unknowns) == 0 { // check cache for unknowns + var errs []*ast.Error + unknowns, errs = s.compileFiltersUnknowns(m, annotationsCompiler, urlPath, request.Query) + if errs != nil { + writer.Error(w, http.StatusBadRequest, + types.NewErrorV1(types.CodeEvaluation, types.MsgEvaluationError). + WithASTErrors(errs)) + return + } + } + + if maskingRule == nil { + var errs []*ast.Error + maskingRule, errs = s.compileFiltersMaskRule(m, annotationsCompiler, urlPath, request.Query) + if errs != nil { + writer.Error(w, http.StatusBadRequest, + types.NewErrorV1(types.CodeEvaluation, types.MsgEvaluationError). + WithASTErrors(errs)) + return + } + } + + var ndbCache builtins.NDBCache + if s.ndbCacheEnabled { + ndbCache = builtins.NDBCache{} + } + + contentType, err := sanitizeHeader(r.Header.Get("Accept")) + if err != nil { + writer.Error(w, http.StatusBadRequest, types.NewErrorV1(types.CodeInvalidParameter, "Accept header: %s", err.Error())) + return + } + + target, dialect := targetDialect(contentType) + + targetOption := []rego_compile.CompileOption{} + multi := make([][2]string, len(orig.Options.TargetDialects)) + + switch target { + case "multi": + for i, targetTuple := range orig.Options.TargetDialects { + s := strings.Split(targetTuple, "+") + target, dialect := s[0], s[1] + multi[i] = [2]string{target, dialect} + targetOption = append(targetOption, rego_compile.Target(target, dialect)) + } + default: + targetOption = append(targetOption, rego_compile.Target(target, dialect)) + } + + unks := make([]*ast.Term, len(unknowns)) + for i := range unknowns { + unks[i] = ast.NewTerm(unknowns[i]) + } + m.Timer(timerPrepPartial).Start() + // NB(sr): just cache preparedCompile by path? + preparedCompile, err := rego_compile.New( + append(targetOption, + rego_compile.ParsedUnknowns(unks...), + rego_compile.ParsedQuery(request.Query), + rego_compile.Metrics(m), + rego_compile.Mappings(orig.Options.Mappings), + rego_compile.MaskRule(maskingRule), + rego_compile.Rego( + rego.Compiler(comp), + rego.Store(s.store), + rego.Transaction(txn), + rego.DisableInlining(orig.Options.DisableInlining), + rego.QueryTracer(buf), + rego.Instrument(includeInstrumentation), + rego.NDBuiltinCache(ndbCache), + rego.Runtime(s.runtime), + rego.UnsafeBuiltins(unsafeBuiltinsMap), + rego.InterQueryBuiltinCache(s.interQueryBuiltinCache), + rego.InterQueryBuiltinValueCache(s.interQueryBuiltinValueCache), + rego.PrintHook(s.manager.PrintHook()), + rego.DistributedTracingOpts(s.distributedTracingOpts), + ), + )..., + ).Prepare(ctx) + if err != nil { + switch err := err.(type) { + case ast.Errors: + writer.Error(w, http.StatusBadRequest, types.NewErrorV1(types.CodeInvalidParameter, types.MsgCompileModuleError).WithASTErrors(err)) + default: + writer.ErrorAuto(w, err) + } + return + + } + m.Timer(timerPrepPartial).Stop() + + qt := failtracer.New() + + filters, err := preparedCompile.Compile(ctx, + rego.EvalTransaction(txn), + rego.EvalParsedInput(request.Input), + rego.EvalPrintHook(s.manager.PrintHook()), + rego.EvalNDBuiltinCache(ndbCache), + rego.EvalInterQueryBuiltinCache(s.interQueryBuiltinCache), + rego.EvalInterQueryBuiltinValueCache(s.interQueryBuiltinValueCache), + rego.EvalQueryTracer(qt), + ) + if err != nil { + switch err := err.(type) { + case ast.Errors: + writer.Error(w, http.StatusBadRequest, types.NewErrorV1(types.CodeEvaluation, types.MsgEvaluationError).WithASTErrors(err)) + default: + writer.ErrorAuto(w, err) + } + return + } + + result := CompileResponseV1{ + Hints: qt.Hints(unknowns), + } + + switch target { + case "multi": + targets := struct { + UCAST *CompileResult `json:"ucast,omitempty"` + Postgres *CompileResult `json:"postgresql,omitempty"` + MySQL *CompileResult `json:"mysql,omitempty"` + MSSQL *CompileResult `json:"sqlserver,omitempty"` + SQLite *CompileResult `json:"sqlite,omitempty"` + }{} + + for _, targetTuple := range multi { + target, dialect := targetTuple[0], targetTuple[1] + f := filters.For(target, dialect) + var cr *CompileResult + if f.Query != nil { + cr = &CompileResult{Query: f.Query, Masks: f.Masks} + } + switch target { + case "ucast": + if targets.UCAST != nil { + continue // there's only one UCAST representation, don't translate that twice + } + targets.UCAST = cr + case "sql": + switch dialect { + case "postgresql": + targets.Postgres = cr + case "mysql": + targets.MySQL = cr + case "sqlserver": + targets.MSSQL = cr + case "sqlite": + targets.SQLite = cr + } + } + } + + t0 := any(targets) + result.Result = &t0 + + default: + f := filters.For(target, dialect) + if f.Query != nil { + s0 := any(&CompileResult{Query: f.Query, Masks: f.Masks}) + result.Result = &s0 + } + } + + m.Timer(metrics.ServerHandler).Stop() + fin(w, result, contentType, m, includeMetrics(r), includeInstrumentation, pretty(r)) + + unk := make([]string, len(unknowns)) + for i := range unknowns { + unk[i] = unknowns[i].String() + } + + br, _ := getRevisions(ctx, s.store, txn) + ctx, logger := s.getDecisionLogger(ctx, br) + custom := map[string]any{ + "options": orig.Options, + "unknowns": unk, + "type": decisionLogType, + "mask_rule": maskingRule.String(), + } + + if err := logger.Log(ctx, txn, urlPath, orig.Query, orig.Input, request.Input, result.Result, ndbCache, nil, m, custom); err != nil { + writer.ErrorAuto(w, err) + return + } +} + +func (s *Server) compileFiltersUnknowns(m metrics.Metrics, comp *ast.Compiler, path string, query ast.Body) ([]ast.Ref, []*ast.Error) { + key := path + unknowns, ok := s.compileUnknownsCache.Get(key) + if ok { + return unknowns, nil + } + m.Timer(timerExtractAnnotationsUnknowns).Start() + if len(query) != 1 { + return nil, nil + } + q, ok := query[0].Terms.(*ast.Term) + if !ok { + return nil, nil + } + queryRef, ok := q.Value.(ast.Ref) + if !ok { + return nil, nil + } + unknowns, errs := compile.ExtractUnknownsFromAnnotations(comp, queryRef) + if errs != nil { + return nil, errs + } + m.Timer(timerExtractAnnotationsUnknowns).Stop() + + s.compileUnknownsCache.Add(key, unknowns) + return unknowns, nil +} + +func (s *Server) compileFiltersMaskRule(m metrics.Metrics, comp *ast.Compiler, path string, query ast.Body) (ast.Ref, []*ast.Error) { + key := path + mr, ok := s.compileMaskingRulesCache.Get(key) + if ok { + return mr, nil + } + + m.Timer(timerExtractAnnotationsMask).Start() + if len(query) != 1 { + return nil, nil + } + q, ok := query[0].Terms.(*ast.Term) + if !ok { + return nil, nil + } + queryRef, ok := q.Value.(ast.Ref) + if !ok { + return nil, nil + } + parsedMaskingRule, err := compile.ExtractMaskRuleRefFromAnnotations(comp, queryRef) + if err != nil { + return parsedMaskingRule, []*ast.Error{err} + } + mr = parsedMaskingRule + m.Timer(timerExtractAnnotationsMask).Stop() + + s.compileMaskingRulesCache.Add(key, parsedMaskingRule) + return mr, nil +} + +func readInputCompileFiltersV1(comp *ast.Compiler, reqBytes []byte, urlPath string, queryParserOptions ast.ParserOptions) (*CompileFiltersRequestV1, *compileFiltersRequest, *types.ErrorV1) { + var request CompileFiltersRequestV1 + + if len(reqBytes) > 0 { + if err := util.NewJSONDecoder(bytes.NewBuffer(reqBytes)).Decode(&request); err != nil { + return nil, nil, types.NewErrorV1(types.CodeInvalidParameter, "error(s) occurred while decoding request: %v", err.Error()) + } + } + + var query ast.Body + var err error + if urlPath != "" { + v, err := stringPathToDataRef(urlPath) + if err != nil { + return nil, nil, types.NewErrorV1(types.CodeInvalidParameter, "invalid path: %v", err) + } + query = []*ast.Expr{ast.NewExpr(ast.NewTerm(v))} + } else { // attempt to parse query + query, err = ast.ParseBodyWithOpts(request.Query, queryParserOptions) + if err != nil { + switch err := err.(type) { + case ast.Errors: + return nil, nil, types.NewErrorV1(types.CodeInvalidParameter, types.MsgParseQueryError).WithASTErrors(err) + default: + return nil, nil, types.NewErrorV1(types.CodeInvalidParameter, "%v: %v", types.MsgParseQueryError, err) + } + } + } + + var input ast.Value + if request.Input != nil { + input, err = ast.InterfaceToValue(*request.Input) + if err != nil { + return nil, nil, types.NewErrorV1(types.CodeInvalidParameter, "error(s) occurred while converting input: %v", err) + } + } + + var unknowns []ast.Ref + if request.Unknowns != nil { + unknowns = make([]ast.Ref, len(*request.Unknowns)) + for i, s := range *request.Unknowns { + unknowns[i], err = ast.ParseRef(s) + if err != nil { + return nil, nil, types.NewErrorV1(types.CodeInvalidParameter, "error(s) occurred while parsing unknowns: %v", err) + } + } + } + + var maskRuleRef ast.Ref + if request.Options.MaskRule != "" { + maskPath := request.Options.MaskRule + if !strings.HasPrefix(request.Options.MaskRule, "data.") { + // If the mask_rule is not a data ref try adding package prefix from URL path. + dataFiltersRuleRef, _ := stringPathToDataRef(urlPath) + maskPath = dataFiltersRuleRef[:len(dataFiltersRuleRef)-1].String() + "." + request.Options.MaskRule + } + maskRuleRef, err = ast.ParseRef(maskPath) + if err != nil { + hint := compile.FuzzyRuleNameMatchHint(comp, request.Options.MaskRule) + return nil, nil, types.NewErrorV1(types.CodeInvalidParameter, "error(s) occurred while parsing mask_rule name: %s", hint) + } + } + + return &request, &compileFiltersRequest{ + Query: query, + Input: input, + Unknowns: unknowns, + Options: compileFiltersRequestOptions{ + MaskRule: maskRuleRef, + }, + }, nil +} + +func fin(w http.ResponseWriter, + result CompileResponseV1, + contentType string, + metrics metrics.Metrics, + includeMetrics, includeInstrumentation, pretty bool, +) { + if includeMetrics || includeInstrumentation { + result.Metrics = metrics.All() + } + + enc := json.NewEncoder(w) + if pretty { + enc.SetIndent("", " ") + } + + w.Header().Add("Content-Type", contentType) + // If Encode() calls w.Write() for the first time, it'll set the HTTP status + // to 200 OK. + if err := enc.Encode(result); err != nil { + writer.ErrorAuto(w, err) + return + } +} + +func sanitizeHeader(accept string) (string, error) { + if accept == "" { + return "", errors.New("missing required header") + } + + if strings.Contains(accept, ",") { + return "", errors.New("multiple headers not supported") + } + + if !slices.Contains(allKnownHeaders, accept) { + return "", fmt.Errorf("unsupported header: %s", accept) + } + + return accept, nil +} + +func targetDialect(accept string) (string, string) { + switch accept { + case applicationJSON: + return "", "" + case multiTargetJSON: + return "multi", "" + case ucastAllJSON: + return "ucast", "all" + case ucastMinimalJSON: + return "ucast", "minimal" + case ucastPrismaJSON: + return "ucast", "prisma" + case ucastLINQJSON: + return "ucast", "linq" + case sqlPostgresJSON: + return "sql", "postgresql" + case sqlMySQLJSON: + return "sql", "mysql" + case sqlSQLServerJSON: + return "sql", "sqlserver" + case sqliteJSON: + return "sql", "sqlite" + } + + panic("unreachable") +} + +func cloneCompiler(c *ast.Compiler) *ast.Compiler { + return ast.NewCompiler(). + WithDefaultRegoVersion(c.DefaultRegoVersion()). + WithCapabilities(c.Capabilities()) +} + +func prepareAnnotations( + ctx context.Context, + comp *ast.Compiler, + store storage.Store, + txn storage.Transaction, + po ast.ParserOptions, +) (*ast.Compiler, ast.Errors) { + var errs []*ast.Error + mods, err := store.ListPolicies(ctx, txn) + if err != nil { + return nil, append(errs, ast.NewError(invalidUnknownCode, nil, "failed to list policies for annotation set: %s", err)) + } + po.ProcessAnnotation = true + po.RegoVersion = comp.DefaultRegoVersion() + modules := make(map[string]*ast.Module, len(mods)) + for _, module := range mods { + vsn, ok := comp.Modules[module] + if ok { // NB(sr): I think this should be impossible. Let's try not to panic, and fall back to the default if it _does happen_. + po.RegoVersion = vsn.RegoVersion() + } + rego, err := store.GetPolicy(ctx, txn, module) + if err != nil { + return nil, append(errs, ast.NewError(invalidUnknownCode, nil, "failed to read module for annotation set: %s", err)) + } + m, err := ast.ParseModuleWithOpts(module, string(rego), po) + if err != nil { + errs = append(errs, ast.NewError(invalidUnknownCode, nil, "failed to parse module for annotation set: %s", err)) + continue + } + modules[module] = m + } + if errs != nil { + return nil, errs + } + + comp0 := cloneCompiler(comp) + comp0.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn)).Compile(modules) + if len(comp0.Errors) > 0 { + return nil, comp0.Errors + } + return comp0, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/server/failtracer/failtracer.go b/vendor/github.com/open-policy-agent/opa/v1/server/failtracer/failtracer.go new file mode 100644 index 000000000..e97292d54 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/server/failtracer/failtracer.go @@ -0,0 +1,119 @@ +package failtracer + +import ( + "fmt" + "slices" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown" + + "github.com/open-policy-agent/opa/internal/levenshtein" +) + +const ( + // maxDistanceForHint is the levenshtein distance below which we'll emit a hint + maxDistanceForHint = 3 +) + +type Hint struct { + Message string `json:"message"` + Location *ast.Location `json:"location,omitempty"` +} + +type failTracer struct { + exprs []*ast.Expr +} + +type FailTracer interface { + Enabled() bool + TraceEvent(topdown.Event) + Config() topdown.TraceConfig + Hints([]ast.Ref) []Hint +} + +func New() FailTracer { + return &failTracer{} +} + +// Enabled always returns true if the failTracer is instantiated. +func (b *failTracer) Enabled() bool { + return b != nil +} + +func (b *failTracer) TraceEvent(evt topdown.Event) { + if evt.Op == topdown.FailOp { + expr, ok := evt.Node.(*ast.Expr) + if ok { + b.exprs = append(b.exprs, expr) + } + } +} + +// Config returns the Tracers standard configuration +func (*failTracer) Config() topdown.TraceConfig { + return topdown.TraceConfig{PlugLocalVars: true} +} + +func (b *failTracer) Hints(unknowns []ast.Ref) []Hint { + var hints []Hint //nolint:prealloc + seenRefs := map[string]struct{}{} + candidates := make([]string, 0, len(unknowns)) + for _, ref := range unknowns { + if len(ref) < 2 { + continue + } + candidates = append(candidates, string(ref[1].Value.(ast.String))) + } + + for _, expr := range b.exprs { + var ref ast.Ref // when this is processed, only one input.X.Y ref is in the expression (SSA) + switch { + case expr.IsCall(): + for i := range 2 { + op := expr.Operand(i) + if r, ok := op.Value.(ast.Ref); ok && r.HasPrefix(ast.InputRootRef) { + ref = r + } + } + } + // NOTE(sr): if we allow naked ast.Term for filter policies, they need to be handled in switch ^ + + if len(ref) < 2 { + continue + } + tblPart, ok := ref[1].Value.(ast.String) + if !ok { + continue + } + miss := string(tblPart) + rs := ref[1:].String() + if _, ok := seenRefs[rs]; ok { + continue + } + + closestStrings := levenshtein.ClosestStrings(maxDistanceForHint, miss, slices.Values(candidates)) + proposals := make([]ast.Ref, len(closestStrings)) + for i := range closestStrings { + prop := make([]*ast.Term, 2, len(ref)) + prop[0] = ast.InputRootDocument + prop[1] = ast.StringTerm(closestStrings[i]) + prop = append(prop, ref[2:]...) + proposals[i] = prop + } + var msg string + switch len(proposals) { + case 0: + continue + case 1: + msg = fmt.Sprintf("%v undefined, did you mean %s?", ref, proposals[0]) + default: + msg = fmt.Sprintf("%v undefined, did you mean any of %v?", ref, proposals) + } + hints = append(hints, Hint{ + Location: expr.Loc(), + Message: msg, + }) + seenRefs[rs] = struct{}{} + } + return hints +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/server/server.go b/vendor/github.com/open-policy-agent/opa/v1/server/server.go index 971ac28ec..456e3c90e 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/server/server.go +++ b/vendor/github.com/open-policy-agent/opa/v1/server/server.go @@ -25,17 +25,16 @@ import ( "sync" "time" - "github.com/open-policy-agent/opa/v1/hooks" - + lru "github.com/hashicorp/golang-lru/v2" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "github.com/open-policy-agent/opa/internal/json/patch" "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/hooks" "github.com/open-policy-agent/opa/v1/logging" "github.com/open-policy-agent/opa/v1/metrics" "github.com/open-policy-agent/opa/v1/plugins" @@ -153,6 +152,9 @@ type Server struct { unixSocketPerm *string cipherSuites *[]uint16 hooks hooks.Hooks + + compileUnknownsCache *lru.Cache[string, []ast.Ref] + compileMaskingRulesCache *lru.Cache[string, ast.Ref] } // Metrics defines the interface that the server requires for recording HTTP @@ -185,6 +187,8 @@ type Loop func() error // New returns a new Server. func New() *Server { s := Server{} + s.compileUnknownsCache, _ = lru.New[string, []ast.Ref](unknownsCacheSize) + s.compileMaskingRulesCache, _ = lru.New[string, ast.Ref](maskingRuleCacheSize) return &s } @@ -884,6 +888,8 @@ func (s *Server) initRouters(ctx context.Context) { mainRouter.Handle("GET /v1/query", s.instrumentHandler(s.v1QueryGet, PromHandlerV1Query)) mainRouter.Handle("POST /v1/query", s.instrumentHandler(s.v1QueryPost, PromHandlerV1Query)) mainRouter.Handle("POST /v1/compile", s.instrumentHandler(s.v1CompilePost, PromHandlerV1Compile)) + mainRouter.Handle("POST /v1/compile/{path...}", s.instrumentHandler(s.v1CompileFilters, PromHandlerV1Compile)) + mainRouter.Handle("GET /v1/compile/{path...}", s.instrumentHandler(s.v1CompileFilters, PromHandlerV1Compile)) mainRouter.Handle("GET /v1/config", s.instrumentHandler(s.v1ConfigGet, PromHandlerV1Config)) mainRouter.Handle("GET /v1/status", s.instrumentHandler(s.v1StatusGet, PromHandlerV1Status)) mainRouter.Handle("POST /{$}", s.instrumentHandler(s.unversionedPost, PromHandlerIndex)) @@ -974,7 +980,7 @@ func (s *Server) execQuery(ctx context.Context, br bundleRevisions, txn storage. output, err := rego.Eval(ctx) if err != nil { - _ = logger.Log(ctx, txn, "", parsedQuery.String(), rawInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, "", parsedQuery.String(), rawInput, input, nil, ndbCache, err, m, nil) return nil, err } @@ -991,7 +997,7 @@ func (s *Server) execQuery(ctx context.Context, br bundleRevisions, txn storage. } var x any = results.Result - if err := logger.Log(ctx, txn, "", parsedQuery.String(), rawInput, input, &x, ndbCache, nil, m); err != nil { + if err := logger.Log(ctx, txn, "", parsedQuery.String(), rawInput, input, &x, ndbCache, nil, m, nil); err != nil { return nil, err } return &results, nil @@ -1044,7 +1050,7 @@ func getRevisions(ctx context.Context, store storage.Store, txn storage.Transact return br, nil } -func (s *Server) reload(context.Context, storage.Transaction, storage.TriggerEvent) { +func (s *Server) reload(_ context.Context, _ storage.Transaction, evt storage.TriggerEvent) { // NOTE(tsandall): We currently rely on the storage txn to provide // critical sections in the server. // @@ -1056,6 +1062,10 @@ func (s *Server) reload(context.Context, storage.Transaction, storage.TriggerEve s.partials = map[string]rego.PartialResult{} s.preparedEvalQueries = newCache(pqMaxCacheSize) s.defaultDecisionPath = s.generateDefaultDecisionPath() + if evt.PolicyChanged() { + s.compileUnknownsCache.Purge() + s.compileMaskingRulesCache.Purge() + } } func (s *Server) unversionedPost(w http.ResponseWriter, r *http.Request) { @@ -1124,14 +1134,14 @@ func (s *Server) v0QueryPath(w http.ResponseWriter, r *http.Request, urlPath str rego, err := s.makeRego(ctx, false, txn, input, urlPath, m, false, nil, opts) if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } pq, err := rego.PrepareForEval(ctx) if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } @@ -1157,7 +1167,7 @@ func (s *Server) v0QueryPath(w http.ResponseWriter, r *http.Request, urlPath str // Handle results. if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } @@ -1174,7 +1184,7 @@ func (s *Server) v0QueryPath(w http.ResponseWriter, r *http.Request, urlPath str messageType = types.MsgFoundUndefinedError } errV1 := types.NewErrorV1(types.CodeUndefinedDocument, "%v: %v", messageType, ref) - if err := logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, errV1, m); err != nil { + if err := logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, errV1, m, nil); err != nil { writer.ErrorAuto(w, err) return } @@ -1182,7 +1192,7 @@ func (s *Server) v0QueryPath(w http.ResponseWriter, r *http.Request, urlPath str writer.Error(w, http.StatusNotFound, errV1) return } - err = logger.Log(ctx, txn, urlPath, "", goInput, input, &rs[0].Expressions[0].Value, ndbCache, nil, m) + err = logger.Log(ctx, txn, urlPath, "", goInput, input, &rs[0].Expressions[0].Value, ndbCache, nil, m, nil) if err != nil { writer.ErrorAuto(w, err) return @@ -1551,14 +1561,14 @@ func (s *Server) v1DataGet(w http.ResponseWriter, r *http.Request) { rego, err := s.makeRego(ctx, strictBuiltinErrors, txn, input, urlPath, m, includeInstrumentation, buf, opts) if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } pq, err := rego.PrepareForEval(ctx) if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } @@ -1586,7 +1596,7 @@ func (s *Server) v1DataGet(w http.ResponseWriter, r *http.Request) { // Handle results. if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } @@ -1612,7 +1622,7 @@ func (s *Server) v1DataGet(w http.ResponseWriter, r *http.Request) { } } - if err := logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, nil, m); err != nil { + if err := logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, nil, m, nil); err != nil { writer.ErrorAuto(w, err) return } @@ -1626,7 +1636,7 @@ func (s *Server) v1DataGet(w http.ResponseWriter, r *http.Request) { result.Explanation = s.getExplainResponse(explainMode, *buf, pretty(r)) } - if err := logger.Log(ctx, txn, urlPath, "", goInput, input, result.Result, ndbCache, nil, m); err != nil { + if err := logger.Log(ctx, txn, urlPath, "", goInput, input, result.Result, ndbCache, nil, m, nil); err != nil { writer.ErrorAuto(w, err) return } @@ -1778,14 +1788,14 @@ func (s *Server) v1DataPost(w http.ResponseWriter, r *http.Request) { rego, err := s.makeRego(ctx, strictBuiltinErrors, txn, input, urlPath, m, includeInstrumentation, buf, opts) if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } pq, err := rego.PrepareForEval(ctx) if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } @@ -1808,7 +1818,7 @@ func (s *Server) v1DataPost(w http.ResponseWriter, r *http.Request) { // Handle results. if err != nil { - _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m) + _ = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, err, m, nil) writer.ErrorAuto(w, err) return } @@ -1837,7 +1847,7 @@ func (s *Server) v1DataPost(w http.ResponseWriter, r *http.Request) { return } } - if err = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, nil, m); err != nil { + if err = logger.Log(ctx, txn, urlPath, "", goInput, input, nil, ndbCache, nil, m, nil); err != nil { writer.ErrorAuto(w, err) return } @@ -1851,7 +1861,7 @@ func (s *Server) v1DataPost(w http.ResponseWriter, r *http.Request) { result.Explanation = s.getExplainResponse(explainMode, *buf, pretty(r)) } - if err := logger.Log(ctx, txn, urlPath, "", goInput, input, result.Result, ndbCache, nil, m); err != nil { + if err := logger.Log(ctx, txn, urlPath, "", goInput, input, result.Result, ndbCache, nil, m, nil); err != nil { writer.ErrorAuto(w, err) return } @@ -3069,6 +3079,7 @@ func (l decisionLogger) Log( ndbCache builtins.NDBCache, err error, m metrics.Metrics, + custom map[string]any, ) error { if l.logger == nil { return nil @@ -3108,6 +3119,7 @@ func (l decisionLogger) Log( Error: err, Metrics: m, RequestID: rctx.ReqID, + Custom: custom, } if ndbCache != nil { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go index f0f301e6a..4b3a9ba26 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go @@ -1646,12 +1646,11 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) var result *ast.IndexResult var err error + resolver.e = e if e.indexing { - resolver.e = e resolver.args = args result, err = index.Lookup(resolver) } else { - resolver.e = e result, err = index.AllRules(resolver) } if err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go index 8539a9e0d..f3bdf8e41 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go @@ -675,7 +675,7 @@ const gqlCacheName = "graphql" func init() { - var defaultCacheEntries int = 10 + var defaultCacheEntries = 10 var graphqlCacheConfig = cache.NamedValueCacheConfig{ MaxNumEntries: &defaultCacheEntries, } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go index 36fa1572e..f7dc3dc2e 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go @@ -99,6 +99,7 @@ var ( requiredKeys = ast.NewSet(ast.InternedTerm("method"), ast.InternedTerm("url")) httpSendLatencyMetricKey = "rego_builtin_http_send" httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits" + httpSendNetworkRequests = httpSendLatencyMetricKey + "_network_requests" ) type httpSendKey string @@ -1535,6 +1536,9 @@ func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) { return nil, handleHTTPSendErr(c.bctx, err) } + // Increment counter for actual network requests + c.bctx.Metrics.Counter(httpSendNetworkRequests).Incr() + return executeHTTPRequest(c.httpReq, c.httpClient, c.req) } @@ -1586,6 +1590,10 @@ func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } + + // Increment counter for actual network requests + c.bctx.Metrics.Counter(httpSendNetworkRequests).Incr() + return executeHTTPRequest(httpReq, httpClient, c.req) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go index a3f8f0854..1e05a247a 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go @@ -15,8 +15,10 @@ import ( type randIntCachingKey string -var zero = big.NewInt(0) -var one = big.NewInt(1) +var ( + zero = big.NewInt(0) + one = big.NewInt(1) +) func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { if canGenerateCheapRange(operands) { @@ -45,8 +47,9 @@ func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter fun if canGenerateCheapRangeStep(operands) { step, _ := builtins.IntOperand(operands[2].Value, 3) if step <= 0 { - return errors.New("numbers.range_step: step must be a positive number above zero") + return errors.New("numbers.range_step: step must be a positive integer") } + return generateCheapRange(operands, step, iter) } @@ -66,7 +69,7 @@ func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter fun } if step.Cmp(zero) <= 0 { - return errors.New("numbers.range_step: step must be a positive number above zero") + return errors.New("numbers.range_step: step must be a positive integer") } ast, err := generateRange(bctx, x, y, step, "numbers.range_step") @@ -158,11 +161,9 @@ func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, f } func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - strOp, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err - } n, err := builtins.IntOperand(operands[1].Value, 2) @@ -178,7 +179,7 @@ func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.T n = -n } - var key = randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n)) + key := randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n)) if val, ok := bctx.Cache.Get(key); ok { return iter(val.(*ast.Term)) diff --git a/vendor/github.com/open-policy-agent/opa/v1/version/version.go b/vendor/github.com/open-policy-agent/opa/v1/version/version.go index ea6d25bbb..f8644aaba 100644 --- a/vendor/github.com/open-policy-agent/opa/v1/version/version.go +++ b/vendor/github.com/open-policy-agent/opa/v1/version/version.go @@ -10,7 +10,7 @@ import ( "runtime/debug" ) -var Version = "1.8.0" +var Version = "1.9.0" // GoVersion is the version of Go this was built with var GoVersion = runtime.Version() diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0ed55c2ba..4de21512f 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,8 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v2.0.2 +GOLANGCI_LINT_VERSION ?= v2.1.5 +GOLANGCI_FMT_OPTS ?= # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -156,9 +157,13 @@ $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format -common-format: +common-format: $(GOLANGCI_LINT) @echo ">> formatting code" $(GO) fmt $(pkgs) +ifdef GOLANGCI_LINT + @echo ">> formatting code with golangci-lint" + $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS) +endif .PHONY: common-vet common-vet: @@ -248,8 +253,8 @@ $(PROMU): cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) -.PHONY: proto -proto: +.PHONY: common-proto +common-proto: @echo ">> generating code from proto files" @./scripts/genproto.sh diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 67a9d2b44..1fd4381b2 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -123,13 +123,16 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { finish := float64(0) pct := float64(0) recovering := strings.Contains(lines[syncLineIdx], "recovery") + reshaping := strings.Contains(lines[syncLineIdx], "reshape") resyncing := strings.Contains(lines[syncLineIdx], "resync") checking := strings.Contains(lines[syncLineIdx], "check") // Append recovery and resyncing state info. - if recovering || resyncing || checking { + if recovering || resyncing || checking || reshaping { if recovering { state = "recovering" + } else if reshaping { + state = "reshaping" } else if checking { state = "checking" } else { diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index 4b2c4050a..937e1f960 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -66,6 +66,10 @@ type Meminfo struct { // Memory which has been evicted from RAM, and is temporarily // on the disk SwapFree *uint64 + // Memory consumed by the zswap backend (compressed size) + Zswap *uint64 + // Amount of anonymous memory stored in zswap (original size) + Zswapped *uint64 // Memory which is waiting to get written back to the disk Dirty *uint64 // Memory which is actively being written back to the disk @@ -85,6 +89,8 @@ type Meminfo struct { // amount of memory dedicated to the lowest level of page // tables. PageTables *uint64 + // secondary page tables. + SecPageTables *uint64 // NFS pages sent to the server, but not yet committed to // stable storage NFSUnstable *uint64 @@ -129,15 +135,18 @@ type Meminfo struct { Percpu *uint64 HardwareCorrupted *uint64 AnonHugePages *uint64 + FileHugePages *uint64 ShmemHugePages *uint64 ShmemPmdMapped *uint64 CmaTotal *uint64 CmaFree *uint64 + Unaccepted *uint64 HugePagesTotal *uint64 HugePagesFree *uint64 HugePagesRsvd *uint64 HugePagesSurp *uint64 Hugepagesize *uint64 + Hugetlb *uint64 DirectMap4k *uint64 DirectMap2M *uint64 DirectMap1G *uint64 @@ -161,6 +170,8 @@ type Meminfo struct { MlockedBytes *uint64 SwapTotalBytes *uint64 SwapFreeBytes *uint64 + ZswapBytes *uint64 + ZswappedBytes *uint64 DirtyBytes *uint64 WritebackBytes *uint64 AnonPagesBytes *uint64 @@ -171,6 +182,7 @@ type Meminfo struct { SUnreclaimBytes *uint64 KernelStackBytes *uint64 PageTablesBytes *uint64 + SecPageTablesBytes *uint64 NFSUnstableBytes *uint64 BounceBytes *uint64 WritebackTmpBytes *uint64 @@ -182,11 +194,14 @@ type Meminfo struct { PercpuBytes *uint64 HardwareCorruptedBytes *uint64 AnonHugePagesBytes *uint64 + FileHugePagesBytes *uint64 ShmemHugePagesBytes *uint64 ShmemPmdMappedBytes *uint64 CmaTotalBytes *uint64 CmaFreeBytes *uint64 + UnacceptedBytes *uint64 HugepagesizeBytes *uint64 + HugetlbBytes *uint64 DirectMap4kBytes *uint64 DirectMap2MBytes *uint64 DirectMap1GBytes *uint64 @@ -287,6 +302,12 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "SwapFree:": m.SwapFree = &val m.SwapFreeBytes = &valBytes + case "Zswap:": + m.Zswap = &val + m.ZswapBytes = &valBytes + case "Zswapped:": + m.Zswapped = &val + m.ZswapBytes = &valBytes case "Dirty:": m.Dirty = &val m.DirtyBytes = &valBytes @@ -317,6 +338,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "PageTables:": m.PageTables = &val m.PageTablesBytes = &valBytes + case "SecPageTables:": + m.SecPageTables = &val + m.SecPageTablesBytes = &valBytes case "NFS_Unstable:": m.NFSUnstable = &val m.NFSUnstableBytes = &valBytes @@ -350,6 +374,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "AnonHugePages:": m.AnonHugePages = &val m.AnonHugePagesBytes = &valBytes + case "FileHugePages:": + m.FileHugePages = &val + m.FileHugePagesBytes = &valBytes case "ShmemHugePages:": m.ShmemHugePages = &val m.ShmemHugePagesBytes = &valBytes @@ -362,6 +389,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "CmaFree:": m.CmaFree = &val m.CmaFreeBytes = &valBytes + case "Unaccepted:": + m.Unaccepted = &val + m.UnacceptedBytes = &valBytes case "HugePages_Total:": m.HugePagesTotal = &val case "HugePages_Free:": @@ -373,6 +403,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { case "Hugepagesize:": m.Hugepagesize = &val m.HugepagesizeBytes = &valBytes + case "Hugetlb:": + m.Hugetlb = &val + m.HugetlbBytes = &valBytes case "DirectMap4k:": m.DirectMap4k = &val m.DirectMap4kBytes = &valBytes diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06a8d931c..3328556bd 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -101,6 +101,12 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // The address above which program text can run. + StartCode uint64 + // The address below which program text can run. + EndCode uint64 + // The address of the start (i.e., bottom) of the stack. + StartStack uint64 // CPU number last executed on. Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes @@ -177,9 +183,9 @@ func (p Proc) Stat() (ProcStat, error) { &s.VSize, &s.RSS, &s.RSSLimit, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, + &s.StartCode, + &s.EndCode, + &s.StartStack, &ignoreUint64, &ignoreUint64, &ignoreUint64, diff --git a/vendor/github.com/prometheus/procfs/proc_statm.go b/vendor/github.com/prometheus/procfs/proc_statm.go new file mode 100644 index 000000000..ed5798424 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_statm.go @@ -0,0 +1,116 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// - https://man7.org/linux/man-pages/man5/proc_pid_statm.5.html + +// ProcStatm Provides memory usage information for a process, measured in memory pages. +// Read from /proc/[pid]/statm. +type ProcStatm struct { + // The process ID. + PID int + // total program size (same as VmSize in status) + Size uint64 + // resident set size (same as VmRSS in status) + Resident uint64 + // number of resident shared pages (i.e., backed by a file) + Shared uint64 + // text (code) + Text uint64 + // library (unused since Linux 2.6; always 0) + Lib uint64 + // data + stack + Data uint64 + // dirty pages (unused since Linux 2.6; always 0) + Dt uint64 +} + +// NewStatm returns the current status information of the process. +// Deprecated: Use p.Statm() instead. +func (p Proc) NewStatm() (ProcStatm, error) { + return p.Statm() +} + +// Statm returns the current memory usage information of the process. +func (p Proc) Statm() (ProcStatm, error) { + data, err := util.ReadFileNoStat(p.path("statm")) + if err != nil { + return ProcStatm{}, err + } + + statmSlice, err := parseStatm(data) + if err != nil { + return ProcStatm{}, err + } + + procStatm := ProcStatm{ + PID: p.PID, + Size: statmSlice[0], + Resident: statmSlice[1], + Shared: statmSlice[2], + Text: statmSlice[3], + Lib: statmSlice[4], + Data: statmSlice[5], + Dt: statmSlice[6], + } + + return procStatm, nil +} + +// parseStatm return /proc/[pid]/statm data to uint64 slice. +func parseStatm(data []byte) ([]uint64, error) { + var statmSlice []uint64 + statmItems := strings.Fields(string(data)) + for i := 0; i < len(statmItems); i++ { + statmItem, err := strconv.ParseUint(statmItems[i], 10, 64) + if err != nil { + return nil, err + } + statmSlice = append(statmSlice, statmItem) + } + return statmSlice, nil +} + +// SizeBytes returns the process of total program size in bytes. +func (s ProcStatm) SizeBytes() uint64 { + return s.Size * uint64(os.Getpagesize()) +} + +// ResidentBytes returns the process of resident set size in bytes. +func (s ProcStatm) ResidentBytes() uint64 { + return s.Resident * uint64(os.Getpagesize()) +} + +// SHRBytes returns the process of share memory size in bytes. +func (s ProcStatm) SHRBytes() uint64 { + return s.Shared * uint64(os.Getpagesize()) +} + +// TextBytes returns the process of text (code) size in bytes. +func (s ProcStatm) TextBytes() uint64 { + return s.Text * uint64(os.Getpagesize()) +} + +// DataBytes returns the process of data + stack size in bytes. +func (s ProcStatm) DataBytes() uint64 { + return s.Data * uint64(os.Getpagesize()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md index 27ddfee8b..6492bfe85 100644 --- a/vendor/github.com/rcrowley/go-metrics/README.md +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -7,6 +7,15 @@ Go port of Coda Hale's Metrics library: . Documentation: . +Archived as of April 1 2025 +----- +This repository is no longer maintained. The authors recommend you explore the +following newer, more widely adopted libraries for your Go instrumentation +needs: + +* [OpenTelemetry Go SDK](https://opentelemetry.io/docs/languages/go/instrumentation/#metrics) +* [Prometheus Go Client Library](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) + Usage ----- diff --git a/vendor/github.com/sagikazarmark/locafero/.envrc b/vendor/github.com/sagikazarmark/locafero/.envrc index 2e0f9f5f7..5c95dc798 100644 --- a/vendor/github.com/sagikazarmark/locafero/.envrc +++ b/vendor/github.com/sagikazarmark/locafero/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +if ! has nix_direnv_version || ! nix_direnv_version 3.1.0; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.1.0/direnvrc" "sha256-yMJ2OVMzrFaDPn7q8nCBZFRYpL/f0RcHzhmw/i6btJM=" fi use flake . --impure diff --git a/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml index 829de2a4a..a27a42959 100644 --- a/vendor/github.com/sagikazarmark/locafero/.golangci.yaml +++ b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml @@ -1,27 +1,37 @@ +version: "2" + run: timeout: 10m -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/sagikazarmark/locafero) - goimports: - local-prefixes: github.com/sagikazarmark/locafero - misspell: - locale: US - nolintlint: - allow-leading-space: false # require machine-readable nolint directives (with no leading space) - allow-unused: false # report any unused nolint directives - require-specific: false # don't require nolint directives to be specific about which linter is being skipped - revive: - confidence: 0 - linters: enable: - - gci - - goimports + - errcheck + - govet + - ineffassign - misspell - nolintlint - revive + - staticcheck + - unused + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/vendor/github.com/sagikazarmark/locafero/README.md b/vendor/github.com/sagikazarmark/locafero/README.md index a48e8e978..d25fe80f3 100644 --- a/vendor/github.com/sagikazarmark/locafero/README.md +++ b/vendor/github.com/sagikazarmark/locafero/README.md @@ -2,8 +2,8 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml) [![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) -[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/sagikazarmark/locafero?style=flat-square&color=61CFDD) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/sagikazarmark/locafero/badge?style=flat-square)](https://deps.dev/go/github.com%252Fsagikazarmark%252Flocafero) **Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).** diff --git a/vendor/github.com/sagikazarmark/locafero/file_type.go b/vendor/github.com/sagikazarmark/locafero/file_type.go index 9a9b14023..5ea57c93e 100644 --- a/vendor/github.com/sagikazarmark/locafero/file_type.go +++ b/vendor/github.com/sagikazarmark/locafero/file_type.go @@ -5,19 +5,23 @@ import "io/fs" // FileType represents the kind of entries [Finder] can return. type FileType int +// FileType represents the kind of entries [Finder] can return. const ( - FileTypeAll FileType = iota + FileTypeAny FileType = iota FileTypeFile FileTypeDir + + // Deprecated: Use [FileTypeAny] instead. + FileTypeAll = FileTypeAny ) -func (ft FileType) matchFileInfo(info fs.FileInfo) bool { +func (ft FileType) match(info fs.FileInfo) bool { switch ft { - case FileTypeAll: + case FileTypeAny: return true case FileTypeFile: - return !info.IsDir() + return info.Mode().IsRegular() case FileTypeDir: return info.IsDir() diff --git a/vendor/github.com/sagikazarmark/locafero/finder.go b/vendor/github.com/sagikazarmark/locafero/finder.go index ef8d54712..ce43c7826 100644 --- a/vendor/github.com/sagikazarmark/locafero/finder.go +++ b/vendor/github.com/sagikazarmark/locafero/finder.go @@ -1,4 +1,4 @@ -// Package finder looks for files and directories in an {fs.Fs} filesystem. +// Package locafero looks for files and directories in an {fs.Fs} filesystem. package locafero import ( @@ -7,7 +7,7 @@ import ( "path/filepath" "strings" - "github.com/sourcegraph/conc/iter" + "github.com/sourcegraph/conc/pool" "github.com/spf13/afero" ) @@ -44,65 +44,66 @@ type Finder struct { // Find looks for files and directories in an [afero.Fs] filesystem. func (f Finder) Find(fsys afero.Fs) ([]string, error) { // Arbitrary go routine limit (TODO: make this a parameter) - // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError() + p := pool.NewWithResults[[]searchResult]().WithMaxGoroutines(5).WithErrors().WithFirstError() - type searchItem struct { - path string - name string + for _, searchPath := range f.Paths { + for _, searchName := range f.Names { + p.Go(func() ([]searchResult, error) { + // If the name contains any glob character, perform a glob match + if strings.ContainsAny(searchName, globMatch) { + return globWalkSearch(fsys, searchPath, searchName, f.Type) + } + + return statSearch(fsys, searchPath, searchName, f.Type) + }) + } } - var searchItems []searchItem + searchResults, err := flatten(p.Wait()) + if err != nil { + return nil, err + } - for _, searchPath := range f.Paths { - searchPath := searchPath + // Return early if no results were found + if len(searchResults) == 0 { + return nil, nil + } - for _, searchName := range f.Names { - searchName := searchName - - searchItems = append(searchItems, searchItem{searchPath, searchName}) - - // pool.Go(func() ([]string, error) { - // // If the name contains any glob character, perform a glob match - // if strings.ContainsAny(searchName, globMatch) { - // return globWalkSearch(fsys, searchPath, searchName, f.Type) - // } - // - // return statSearch(fsys, searchPath, searchName, f.Type) - // }) - } + results := make([]string, 0, len(searchResults)) + + for _, searchResult := range searchResults { + results = append(results, searchResult.path) } - // allResults, err := pool.Wait() - // if err != nil { - // return nil, err - // } + return results, nil +} - allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { - // If the name contains any glob character, perform a glob match - if strings.ContainsAny(item.name, globMatch) { - return globWalkSearch(fsys, item.path, item.name, f.Type) - } +type searchResult struct { + path string + info fs.FileInfo +} - return statSearch(fsys, item.path, item.name, f.Type) - }) +func flatten[T any](results [][]T, err error) ([]T, error) { if err != nil { return nil, err } - var results []string + var flattened []T - for _, r := range allResults { - results = append(results, r...) + for _, r := range results { + flattened = append(flattened, r...) } - // Sort results in alphabetical order for now - // sort.Strings(results) - - return results, nil + return flattened, nil } -func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { - var results []string +func globWalkSearch( + fsys afero.Fs, + searchPath string, + searchName string, + searchType FileType, +) ([]searchResult, error) { + var results []searchResult err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error { if err != nil { @@ -123,7 +124,7 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT } // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { + if !searchType.match(fileInfo) { return result } @@ -133,7 +134,7 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT } if match { - results = append(results, p) + results = append(results, searchResult{p, fileInfo}) } return result @@ -145,7 +146,12 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT return results, nil } -func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { +func statSearch( + fsys afero.Fs, + searchPath string, + searchName string, + searchType FileType, +) ([]searchResult, error) { filePath := filepath.Join(searchPath, searchName) fileInfo, err := fsys.Stat(filePath) @@ -157,9 +163,9 @@ func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType } // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { + if !searchType.match(fileInfo) { return nil, nil } - return []string{filePath}, nil + return []searchResult{{filePath, fileInfo}}, nil } diff --git a/vendor/github.com/sagikazarmark/locafero/flake.lock b/vendor/github.com/sagikazarmark/locafero/flake.lock index df2a8ccec..b14a842c2 100644 --- a/vendor/github.com/sagikazarmark/locafero/flake.lock +++ b/vendor/github.com/sagikazarmark/locafero/flake.lock @@ -2,30 +2,32 @@ "nodes": { "cachix": { "inputs": { - "devenv": "devenv_2", + "devenv": [ + "devenv" + ], "flake-compat": [ + "devenv" + ], + "git-hooks": [ "devenv", - "flake-compat" + "git-hooks" ], "nixpkgs": [ "devenv", "nixpkgs" - ], - "pre-commit-hooks": [ - "devenv", - "pre-commit-hooks" ] }, "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "lastModified": 1748883665, + "narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=", "owner": "cachix", "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "rev": "f707778d902af4d62d8dd92c269f8e70de09acbe", "type": "github" }, "original": { "owner": "cachix", + "ref": "latest", "repo": "cachix", "type": "github" } @@ -33,52 +35,21 @@ "devenv": { "inputs": { "cachix": "cachix", - "flake-compat": "flake-compat_2", - "nix": "nix_2", - "nixpkgs": "nixpkgs_2", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1725907707, - "narHash": "sha256-s3pbtzZmVPHzc86WQjK7MGZMNvvw6hWnFMljEkllAfM=", - "owner": "cachix", - "repo": "devenv", - "rev": "2bbbbc468fc02257265a79652a8350651cca495a", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "flake-compat": [ - "devenv", - "cachix", - "flake-compat" - ], + "flake-compat": "flake-compat", + "git-hooks": "git-hooks", "nix": "nix", - "nixpkgs": "nixpkgs", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ - "devenv", - "cachix", - "pre-commit-hooks" - ] + "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "lastModified": 1753981111, + "narHash": "sha256-uBJOyMxOkGRmxhD2M5rbN2aV6oP1T2AKq5oBaHHC4mw=", "owner": "cachix", "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "rev": "d4d70df706b153b601a87ab8e81c88a0b1a373b6", "type": "github" }, "original": { "owner": "cachix", - "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -86,27 +57,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { @@ -117,14 +72,18 @@ }, "flake-parts": { "inputs": { - "nixpkgs-lib": "nixpkgs-lib" + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] }, "locked": { - "lastModified": 1725234343, - "narHash": "sha256-+ebgonl3NbiKD2UD0x4BszCZQ6sTfL4xioaM49o5B3Y=", + "lastModified": 1733312601, + "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "567b938d64d4b4112ee253b9274472dc3a346eb6", + "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", "type": "github" }, "original": { @@ -133,39 +92,47 @@ "type": "github" } }, - "flake-utils": { + "flake-parts_2": { "inputs": { - "systems": "systems" + "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "lastModified": 1753121425, + "narHash": "sha256-TVcTNvOeWWk1DXljFxVRp+E0tzG1LhrVjOGGoMHuXio=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "644e0fc48951a860279da645ba77fe4a6e814c5e", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "hercules-ci", + "repo": "flake-parts", "type": "github" } }, - "flake-utils_2": { + "git-hooks": { "inputs": { - "systems": "systems_2" + "flake-compat": [ + "devenv", + "flake-compat" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] }, "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "lastModified": 1750779888, + "narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "cachix", + "repo": "git-hooks.nix", "type": "github" } }, @@ -173,7 +140,7 @@ "inputs": { "nixpkgs": [ "devenv", - "pre-commit-hooks", + "git-hooks", "nixpkgs" ] }, @@ -192,165 +159,49 @@ } }, "nix": { - "inputs": { - "flake-compat": "flake-compat", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], + "flake-parts": "flake-parts", + "git-hooks-nix": [ + "devenv", + "git-hooks" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression_2" + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" + ] }, "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", + "lastModified": 1752773918, + "narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=", + "owner": "cachix", "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "rev": "031c3cf42d2e9391eee373507d8c12e0f9606779", "type": "github" }, "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", + "owner": "cachix", + "ref": "devenv-2.30", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1725233747, - "narHash": "sha256-Ss8QWLXdr2JCBPcYChJhz4xJm+h/xjl4G0c0XlP6a74=", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" - }, - "original": { - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1710695816, - "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "614b4613980a522ba49f0d194531beddbb7220d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1713361204, - "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "lastModified": 1750441195, + "narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=", "owner": "cachix", "repo": "devenv-nixpkgs", - "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "rev": "0ceffe312871b443929ff3006960d29b120dc627", "type": "github" }, "original": { @@ -360,110 +211,42 @@ "type": "github" } }, - "nixpkgs_3": { - "locked": { - "lastModified": 1725910328, - "narHash": "sha256-n9pCtzGZ0httmTwMuEbi5E78UQ4ZbQMr1pzi5N0LAG8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "5775c2583f1801df7b790bf7f7d710a19bac66f4", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, + "nixpkgs-lib": { "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "lastModified": 1751159883, + "narHash": "sha256-urW/Ylk9FIfvXfliA1ywh75yszAbiTEVgpPeinFyVZo=", "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "repo": "nixpkgs.lib", + "rev": "14a40a1d7fb9afa4739275ac642ed7301a9ba1ab", "type": "github" }, "original": { "owner": "nix-community", - "repo": "poetry2nix", + "repo": "nixpkgs.lib", "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils_2", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, + "nixpkgs_2": { "locked": { - "lastModified": 1713775815, - "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "lastModified": 1753939845, + "narHash": "sha256-K2ViRJfdVGE8tpJejs8Qpvvejks1+A4GQej/lBk5y7I=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "94def634a20494ee057c76998843c015909d6311", "type": "github" }, "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_3" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_2" } } }, diff --git a/vendor/github.com/sagikazarmark/locafero/flake.nix b/vendor/github.com/sagikazarmark/locafero/flake.nix index 312f1ec8c..bdb10dbe4 100644 --- a/vendor/github.com/sagikazarmark/locafero/flake.nix +++ b/vendor/github.com/sagikazarmark/locafero/flake.nix @@ -1,64 +1,42 @@ { - description = "Finder library for Afero"; - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; flake-parts.url = "github:hercules-ci/flake-parts"; devenv.url = "github:cachix/devenv"; }; - outputs = inputs@{ flake-parts, ... }: + outputs = + inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.devenv.flakeModule ]; - systems = [ "x86_64-linux" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.lib.mkDefault pkgs.go_1_23; - }; - - packages = with pkgs; [ - just - - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - - ci_1_21 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_21; - }; - }; + systems = [ + "x86_64-linux" + "aarch64-darwin" + ]; - ci_1_22 = { - imports = [ devenv.shells.ci ]; + perSystem = + { pkgs, ... }: + { + devenv.shells = { + default = { + languages = { + go.enable = true; + go.package = pkgs.lib.mkDefault pkgs.go_1_24; + }; - languages = { - go.package = pkgs.go_1_22; - }; - }; + packages = with pkgs; [ + just - ci_1_23 = { - imports = [ devenv.shells.ci ]; + golangci-lint + ]; - languages = { - go.package = pkgs.go_1_23; + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; }; }; }; - }; }; } diff --git a/vendor/github.com/sagikazarmark/locafero/justfile b/vendor/github.com/sagikazarmark/locafero/justfile index 00a88850c..bac5e75db 100644 --- a/vendor/github.com/sagikazarmark/locafero/justfile +++ b/vendor/github.com/sagikazarmark/locafero/justfile @@ -2,10 +2,13 @@ default: just --list test: - go test -race -v ./... + go test -count 10 -shuffle on -race -v ./... + +fuzz: + go test -race -v -fuzz=Fuzz -fuzztime=60s ./... lint: golangci-lint run fmt: - golangci-lint run --fix + golangci-lint fmt diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go index 499789984..69956b425 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -1,4 +1,4 @@ -// +build darwin dragonfly freebsd netbsd openbsd +// +build darwin dragonfly freebsd netbsd openbsd hurd // +build !js package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index 04748b851..c9aed267a 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,5 +1,7 @@ +//go:build (linux || aix || zos) && !js && !wasi // +build linux aix zos // +build !js +// +build !wasi package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go new file mode 100644 index 000000000..2822b212f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go @@ -0,0 +1,8 @@ +//go:build wasi +// +build wasi + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go new file mode 100644 index 000000000..108a6be12 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go @@ -0,0 +1,8 @@ +//go:build wasip1 +// +build wasip1 + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sourcegraph/conc/Makefile b/vendor/github.com/sourcegraph/conc/Makefile new file mode 100644 index 000000000..3e0720a12 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/Makefile @@ -0,0 +1,24 @@ +.DEFAULT_GOAL := help + +GO_BIN ?= $(shell go env GOPATH)/bin + +.PHONY: help +help: + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +$(GO_BIN)/golangci-lint: + @echo "==> Installing golangci-lint within "${GO_BIN}"" + @go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +.PHONY: lint +lint: $(GO_BIN)/golangci-lint ## Run linting on Go files + @echo "==> Linting Go source files" + @golangci-lint run -v --fix -c .golangci.yml ./... + +.PHONY: test +test: ## Run tests + go test -race -v ./... -coverprofile ./coverage.txt + +.PHONY: bench +bench: ## Run benchmarks. See https://pkg.go.dev/cmd/go#hdr-Testing_flags + go test ./... -bench . -benchtime 5s -timeout 0 -run=XXX -cpu 1 -benchmem diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go deleted file mode 100644 index 7087e32a8..000000000 --- a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !go1.20 -// +build !go1.20 - -package multierror - -import "go.uber.org/multierr" - -var ( - Join = multierr.Combine -) diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go deleted file mode 100644 index 39cff829a..000000000 --- a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.20 -// +build go1.20 - -package multierror - -import "errors" - -var ( - Join = errors.Join -) diff --git a/vendor/github.com/sourcegraph/conc/iter/iter.go b/vendor/github.com/sourcegraph/conc/iter/iter.go deleted file mode 100644 index 124b4f940..000000000 --- a/vendor/github.com/sourcegraph/conc/iter/iter.go +++ /dev/null @@ -1,85 +0,0 @@ -package iter - -import ( - "runtime" - "sync/atomic" - - "github.com/sourcegraph/conc" -) - -// defaultMaxGoroutines returns the default maximum number of -// goroutines to use within this package. -func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) } - -// Iterator can be used to configure the behaviour of ForEach -// and ForEachIdx. The zero value is safe to use with reasonable -// defaults. -// -// Iterator is also safe for reuse and concurrent use. -type Iterator[T any] struct { - // MaxGoroutines controls the maximum number of goroutines - // to use on this Iterator's methods. - // - // If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0). - MaxGoroutines int -} - -// ForEach executes f in parallel over each element in input. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// ForEach always uses at most runtime.GOMAXPROCS goroutines. -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. For -// a configurable goroutine limit, use a custom Iterator. -func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) } - -// ForEach executes f in parallel over each element in input, -// using up to the Iterator's configured maximum number of -// goroutines. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. -func (iter Iterator[T]) ForEach(input []T, f func(*T)) { - iter.ForEachIdx(input, func(_ int, t *T) { - f(t) - }) -} - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) } - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) { - if iter.MaxGoroutines == 0 { - // iter is a value receiver and is hence safe to mutate - iter.MaxGoroutines = defaultMaxGoroutines() - } - - numInput := len(input) - if iter.MaxGoroutines > numInput { - // No more concurrent tasks than the number of input items. - iter.MaxGoroutines = numInput - } - - var idx atomic.Int64 - // Create the task outside the loop to avoid extra closure allocations. - task := func() { - i := int(idx.Add(1) - 1) - for ; i < numInput; i = int(idx.Add(1) - 1) { - f(i, &input[i]) - } - } - - var wg conc.WaitGroup - for i := 0; i < iter.MaxGoroutines; i++ { - wg.Go(task) - } - wg.Wait() -} diff --git a/vendor/github.com/sourcegraph/conc/iter/map.go b/vendor/github.com/sourcegraph/conc/iter/map.go deleted file mode 100644 index efbe6bfaf..000000000 --- a/vendor/github.com/sourcegraph/conc/iter/map.go +++ /dev/null @@ -1,65 +0,0 @@ -package iter - -import ( - "sync" - - "github.com/sourcegraph/conc/internal/multierror" -) - -// Mapper is an Iterator with a result type R. It can be used to configure -// the behaviour of Map and MapErr. The zero value is safe to use with -// reasonable defaults. -// -// Mapper is also safe for reuse and concurrent use. -type Mapper[T, R any] Iterator[T] - -// Map applies f to each element of input, returning the mapped result. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func Map[T, R any](input []T, f func(*T) R) []R { - return Mapper[T, R]{}.Map(input, f) -} - -// Map applies f to each element of input, returning the mapped result. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R { - res := make([]R, len(input)) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - res[i] = f(t) - }) - return res -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) { - return Mapper[T, R]{}.MapErr(input, f) -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) { - var ( - res = make([]R, len(input)) - errMux sync.Mutex - errs error - ) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - var err error - res[i], err = f(t) - if err != nil { - errMux.Lock() - // TODO: use stdlib errors once multierrors land in go 1.20 - errs = multierror.Join(errs, err) - errMux.Unlock() - } - }) - return res, errs -} diff --git a/vendor/github.com/sourcegraph/conc/pool/context_pool.go b/vendor/github.com/sourcegraph/conc/pool/context_pool.go new file mode 100644 index 000000000..85c34e5ae --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/context_pool.go @@ -0,0 +1,104 @@ +package pool + +import ( + "context" +) + +// ContextPool is a pool that runs tasks that take a context. +// A new ContextPool should be created with `New().WithContext(ctx)`. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ContextPool struct { + errorPool ErrorPool + + ctx context.Context + cancel context.CancelFunc + + cancelOnError bool +} + +// Go submits a task. If it returns an error, the error will be +// collected and returned by Wait(). If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ContextPool) Go(f func(ctx context.Context) error) { + p.errorPool.Go(func() error { + if p.cancelOnError { + // If we are cancelling on error, then we also want to cancel if a + // panic is raised. To do this, we need to recover, cancel, and then + // re-throw the caught panic. + defer func() { + if r := recover(); r != nil { + p.cancel() + panic(r) + } + }() + } + + err := f(p.ctx) + if err != nil && p.cancelOnError { + // Leaky abstraction warning: We add the error directly because + // otherwise, canceling could cause another goroutine to exit and + // return an error before this error was added, which breaks the + // expectations of WithFirstError(). + p.errorPool.addErr(err) + p.cancel() + return nil + } + return err + }) +} + +// Wait cleans up all spawned goroutines, propagates any panics, and +// returns an error if any of the tasks errored. +func (p *ContextPool) Wait() error { + // Make sure we call cancel after pool is done to avoid memory leakage. + defer p.cancel() + return p.errorPool.Wait() +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +// This is particularly useful for (*ContextPool).WithCancelOnError(), +// where all errors after the first are likely to be context.Canceled. +func (p *ContextPool) WithFirstError() *ContextPool { + p.panicIfInitialized() + p.errorPool.WithFirstError() + return p +} + +// WithCancelOnError configures the pool to cancel its context as soon as +// any task returns an error or panics. By default, the pool's context is not +// canceled until the parent context is canceled. +// +// In this case, all errors returned from the pool after the first will +// likely be context.Canceled - you may want to also use +// (*ContextPool).WithFirstError() to configure the pool to only return +// the first error. +func (p *ContextPool) WithCancelOnError() *ContextPool { + p.panicIfInitialized() + p.cancelOnError = true + return p +} + +// WithFailFast is an alias for the combination of WithFirstError and +// WithCancelOnError. By default, the errors from all tasks are returned and +// the pool's context is not canceled until the parent context is canceled. +func (p *ContextPool) WithFailFast() *ContextPool { + p.panicIfInitialized() + p.WithFirstError() + p.WithCancelOnError() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ContextPool) WithMaxGoroutines(n int) *ContextPool { + p.panicIfInitialized() + p.errorPool.WithMaxGoroutines(n) + return p +} + +func (p *ContextPool) panicIfInitialized() { + p.errorPool.panicIfInitialized() +} diff --git a/vendor/github.com/sourcegraph/conc/pool/error_pool.go b/vendor/github.com/sourcegraph/conc/pool/error_pool.go new file mode 100644 index 000000000..e1789e61b --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/error_pool.go @@ -0,0 +1,100 @@ +package pool + +import ( + "context" + "errors" + "sync" +) + +// ErrorPool is a pool that runs tasks that may return an error. +// Errors are collected and returned by Wait(). +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +// +// A new ErrorPool should be created using `New().WithErrors()`. +type ErrorPool struct { + pool Pool + + onlyFirstError bool + + mu sync.Mutex + errs []error +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ErrorPool) Go(f func() error) { + p.pool.Go(func() { + p.addErr(f()) + }) +} + +// Wait cleans up any spawned goroutines, propagating any panics and +// returning any errors from tasks. +func (p *ErrorPool) Wait() error { + p.pool.Wait() + + errs := p.errs + p.errs = nil // reset errs + + if len(errs) == 0 { + return nil + } else if p.onlyFirstError { + return errs[0] + } else { + return errors.Join(errs...) + } +} + +// WithContext converts the pool to a ContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ErrorPool) WithContext(ctx context.Context) *ContextPool { + p.panicIfInitialized() + ctx, cancel := context.WithCancel(ctx) + return &ContextPool{ + errorPool: p.deref(), + ctx: ctx, + cancel: cancel, + } +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ErrorPool) WithFirstError() *ErrorPool { + p.panicIfInitialized() + p.onlyFirstError = true + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ErrorPool) WithMaxGoroutines(n int) *ErrorPool { + p.panicIfInitialized() + p.pool.WithMaxGoroutines(n) + return p +} + +// deref is a helper that creates a shallow copy of the pool with the same +// settings. We don't want to just dereference the pointer because that makes +// the copylock lint angry. +func (p *ErrorPool) deref() ErrorPool { + return ErrorPool{ + pool: p.pool.deref(), + onlyFirstError: p.onlyFirstError, + } +} + +func (p *ErrorPool) panicIfInitialized() { + p.pool.panicIfInitialized() +} + +func (p *ErrorPool) addErr(err error) { + if err != nil { + p.mu.Lock() + p.errs = append(p.errs, err) + p.mu.Unlock() + } +} diff --git a/vendor/github.com/sourcegraph/conc/pool/pool.go b/vendor/github.com/sourcegraph/conc/pool/pool.go new file mode 100644 index 000000000..8f4494efb --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/pool.go @@ -0,0 +1,174 @@ +package pool + +import ( + "context" + "sync" + + "github.com/sourcegraph/conc" +) + +// New creates a new Pool. +func New() *Pool { + return &Pool{} +} + +// Pool is a pool of goroutines used to execute tasks concurrently. +// +// Tasks are submitted with Go(). Once all your tasks have been submitted, you +// must call Wait() to clean up any spawned goroutines and propagate any +// panics. +// +// Goroutines are started lazily, so creating a new pool is cheap. There will +// never be more goroutines spawned than there are tasks submitted. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +// +// Pool is efficient, but not zero cost. It should not be used for very short +// tasks. Startup and teardown come with an overhead of around 1µs, and each +// task has an overhead of around 300ns. +type Pool struct { + handle conc.WaitGroup + limiter limiter + tasks chan func() + initOnce sync.Once +} + +// Go submits a task to be run in the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *Pool) Go(f func()) { + p.init() + + if p.limiter == nil { + // No limit on the number of goroutines. + select { + case p.tasks <- f: + // A goroutine was available to handle the task. + default: + // No goroutine was available to handle the task. + // Spawn a new one and send it the task. + p.handle.Go(func() { + p.worker(f) + }) + } + } else { + select { + case p.limiter <- struct{}{}: + // If we are below our limit, spawn a new worker rather + // than waiting for one to become available. + p.handle.Go(func() { + p.worker(f) + }) + case p.tasks <- f: + // A worker is available and has accepted the task. + return + } + } + +} + +// Wait cleans up spawned goroutines, propagating any panics that were +// raised by a tasks. +func (p *Pool) Wait() { + p.init() + + close(p.tasks) + + // After Wait() returns, reset the struct so tasks will be reinitialized on + // next use. This better matches the behavior of sync.WaitGroup + defer func() { p.initOnce = sync.Once{} }() + + p.handle.Wait() +} + +// MaxGoroutines returns the maximum size of the pool. +func (p *Pool) MaxGoroutines() int { + return p.limiter.limit() +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *Pool) WithMaxGoroutines(n int) *Pool { + p.panicIfInitialized() + if n < 1 { + panic("max goroutines in a pool must be greater than zero") + } + p.limiter = make(limiter, n) + return p +} + +// init ensures that the pool is initialized before use. This makes the +// zero value of the pool usable. +func (p *Pool) init() { + p.initOnce.Do(func() { + p.tasks = make(chan func()) + }) +} + +// panicIfInitialized will trigger a panic if a configuration method is called +// after the pool has started any goroutines for the first time. In the case that +// new settings are needed, a new pool should be created. +func (p *Pool) panicIfInitialized() { + if p.tasks != nil { + panic("pool can not be reconfigured after calling Go() for the first time") + } +} + +// WithErrors converts the pool to an ErrorPool so the submitted tasks can +// return errors. +func (p *Pool) WithErrors() *ErrorPool { + p.panicIfInitialized() + return &ErrorPool{ + pool: p.deref(), + } +} + +// deref is a helper that creates a shallow copy of the pool with the same +// settings. We don't want to just dereference the pointer because that makes +// the copylock lint angry. +func (p *Pool) deref() Pool { + p.panicIfInitialized() + return Pool{ + limiter: p.limiter, + } +} + +// WithContext converts the pool to a ContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *Pool) WithContext(ctx context.Context) *ContextPool { + p.panicIfInitialized() + ctx, cancel := context.WithCancel(ctx) + return &ContextPool{ + errorPool: p.WithErrors().deref(), + ctx: ctx, + cancel: cancel, + } +} + +func (p *Pool) worker(initialFunc func()) { + // The only time this matters is if the task panics. + // This makes it possible to spin up new workers in that case. + defer p.limiter.release() + + if initialFunc != nil { + initialFunc() + } + + for f := range p.tasks { + f() + } +} + +type limiter chan struct{} + +func (l limiter) limit() int { + return cap(l) +} + +func (l limiter) release() { + if l != nil { + <-l + } +} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go new file mode 100644 index 000000000..6bc30dd63 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go @@ -0,0 +1,85 @@ +package pool + +import ( + "context" +) + +// ResultContextPool is a pool that runs tasks that take a context and return a +// result. The context passed to the task will be canceled if any of the tasks +// return an error, which makes its functionality different than just capturing +// a context with the task closure. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ResultContextPool[T any] struct { + contextPool ContextPool + agg resultAggregator[T] + collectErrored bool +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultContextPool[T]) Go(f func(context.Context) (T, error)) { + idx := p.agg.nextIndex() + p.contextPool.Go(func(ctx context.Context) error { + res, err := f(ctx) + p.agg.save(idx, res, err != nil) + return err + }) +} + +// Wait cleans up all spawned goroutines, propagates any panics, and +// returns an error if any of the tasks errored. +func (p *ResultContextPool[T]) Wait() ([]T, error) { + err := p.contextPool.Wait() + results := p.agg.collect(p.collectErrored) + p.agg = resultAggregator[T]{} + return results, err +} + +// WithCollectErrored configures the pool to still collect the result of a task +// even if the task returned an error. By default, the result of tasks that errored +// are ignored and only the error is collected. +func (p *ResultContextPool[T]) WithCollectErrored() *ResultContextPool[T] { + p.panicIfInitialized() + p.collectErrored = true + return p +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ResultContextPool[T]) WithFirstError() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithFirstError() + return p +} + +// WithCancelOnError configures the pool to cancel its context as soon as +// any task returns an error. By default, the pool's context is not +// canceled until the parent context is canceled. +func (p *ResultContextPool[T]) WithCancelOnError() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithCancelOnError() + return p +} + +// WithFailFast is an alias for the combination of WithFirstError and +// WithCancelOnError. By default, the errors from all tasks are returned and +// the pool's context is not canceled until the parent context is canceled. +func (p *ResultContextPool[T]) WithFailFast() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithFailFast() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultContextPool[T]) WithMaxGoroutines(n int) *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithMaxGoroutines(n) + return p +} + +func (p *ResultContextPool[T]) panicIfInitialized() { + p.contextPool.panicIfInitialized() +} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go new file mode 100644 index 000000000..832cd9bb4 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go @@ -0,0 +1,80 @@ +package pool + +import ( + "context" +) + +// ResultErrorPool is a pool that executes tasks that return a generic result +// type and an error. Tasks are executed in the pool with Go(), then the +// results of the tasks are returned by Wait(). +// +// The order of the results is guaranteed to be the same as the order the +// tasks were submitted. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ResultErrorPool[T any] struct { + errorPool ErrorPool + agg resultAggregator[T] + collectErrored bool +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultErrorPool[T]) Go(f func() (T, error)) { + idx := p.agg.nextIndex() + p.errorPool.Go(func() error { + res, err := f() + p.agg.save(idx, res, err != nil) + return err + }) +} + +// Wait cleans up any spawned goroutines, propagating any panics and +// returning the results and any errors from tasks. +func (p *ResultErrorPool[T]) Wait() ([]T, error) { + err := p.errorPool.Wait() + results := p.agg.collect(p.collectErrored) + p.agg = resultAggregator[T]{} // reset for reuse + return results, err +} + +// WithCollectErrored configures the pool to still collect the result of a task +// even if the task returned an error. By default, the result of tasks that errored +// are ignored and only the error is collected. +func (p *ResultErrorPool[T]) WithCollectErrored() *ResultErrorPool[T] { + p.panicIfInitialized() + p.collectErrored = true + return p +} + +// WithContext converts the pool to a ResultContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ResultErrorPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { + p.panicIfInitialized() + return &ResultContextPool[T]{ + contextPool: *p.errorPool.WithContext(ctx), + } +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ResultErrorPool[T]) WithFirstError() *ResultErrorPool[T] { + p.panicIfInitialized() + p.errorPool.WithFirstError() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultErrorPool[T]) WithMaxGoroutines(n int) *ResultErrorPool[T] { + p.panicIfInitialized() + p.errorPool.WithMaxGoroutines(n) + return p +} + +func (p *ResultErrorPool[T]) panicIfInitialized() { + p.errorPool.panicIfInitialized() +} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_pool.go new file mode 100644 index 000000000..f73a77261 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/result_pool.go @@ -0,0 +1,142 @@ +package pool + +import ( + "context" + "sort" + "sync" +) + +// NewWithResults creates a new ResultPool for tasks with a result of type T. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +func NewWithResults[T any]() *ResultPool[T] { + return &ResultPool[T]{ + pool: *New(), + } +} + +// ResultPool is a pool that executes tasks that return a generic result type. +// Tasks are executed in the pool with Go(), then the results of the tasks are +// returned by Wait(). +// +// The order of the results is guaranteed to be the same as the order the +// tasks were submitted. +type ResultPool[T any] struct { + pool Pool + agg resultAggregator[T] +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultPool[T]) Go(f func() T) { + idx := p.agg.nextIndex() + p.pool.Go(func() { + p.agg.save(idx, f(), false) + }) +} + +// Wait cleans up all spawned goroutines, propagating any panics, and returning +// a slice of results from tasks that did not panic. +func (p *ResultPool[T]) Wait() []T { + p.pool.Wait() + results := p.agg.collect(true) + p.agg = resultAggregator[T]{} // reset for reuse + return results +} + +// MaxGoroutines returns the maximum size of the pool. +func (p *ResultPool[T]) MaxGoroutines() int { + return p.pool.MaxGoroutines() +} + +// WithErrors converts the pool to an ResultErrorPool so the submitted tasks +// can return errors. +func (p *ResultPool[T]) WithErrors() *ResultErrorPool[T] { + p.panicIfInitialized() + return &ResultErrorPool[T]{ + errorPool: *p.pool.WithErrors(), + } +} + +// WithContext converts the pool to a ResultContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ResultPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { + p.panicIfInitialized() + return &ResultContextPool[T]{ + contextPool: *p.pool.WithContext(ctx), + } +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultPool[T]) WithMaxGoroutines(n int) *ResultPool[T] { + p.panicIfInitialized() + p.pool.WithMaxGoroutines(n) + return p +} + +func (p *ResultPool[T]) panicIfInitialized() { + p.pool.panicIfInitialized() +} + +// resultAggregator is a utility type that lets us safely append from multiple +// goroutines. The zero value is valid and ready to use. +type resultAggregator[T any] struct { + mu sync.Mutex + len int + results []T + errored []int +} + +// nextIndex reserves a slot for a result. The returned value should be passed +// to save() when adding a result to the aggregator. +func (r *resultAggregator[T]) nextIndex() int { + r.mu.Lock() + defer r.mu.Unlock() + + nextIdx := r.len + r.len += 1 + return nextIdx +} + +func (r *resultAggregator[T]) save(i int, res T, errored bool) { + r.mu.Lock() + defer r.mu.Unlock() + + if i >= len(r.results) { + old := r.results + r.results = make([]T, r.len) + copy(r.results, old) + } + + r.results[i] = res + + if errored { + r.errored = append(r.errored, i) + } +} + +// collect returns the set of aggregated results. +func (r *resultAggregator[T]) collect(collectErrored bool) []T { + if !r.mu.TryLock() { + panic("collect should not be called until all goroutines have exited") + } + + if collectErrored || len(r.errored) == 0 { + return r.results + } + + filtered := r.results[:0] + sort.Ints(r.errored) + for i, e := range r.errored { + if i == 0 { + filtered = append(filtered, r.results[:e]...) + } else { + filtered = append(filtered, r.results[r.errored[i-1]+1:e]...) + } + } + return filtered +} diff --git a/vendor/github.com/spf13/afero/.editorconfig b/vendor/github.com/spf13/afero/.editorconfig index 4492e9f9f..a85749f19 100644 --- a/vendor/github.com/spf13/afero/.editorconfig +++ b/vendor/github.com/spf13/afero/.editorconfig @@ -10,3 +10,6 @@ trim_trailing_whitespace = true [*.go] indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/vendor/github.com/spf13/afero/.golangci.yaml b/vendor/github.com/spf13/afero/.golangci.yaml index 806289a25..4f359b81a 100644 --- a/vendor/github.com/spf13/afero/.golangci.yaml +++ b/vendor/github.com/spf13/afero/.golangci.yaml @@ -1,18 +1,48 @@ -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/afero) +version: "2" + +run: + timeout: 10m linters: - disable-all: true - enable: - - gci - - gofmt - - gofumpt - - staticcheck - -issues: - exclude-dirs: - - gcsfs/internal/stiface + enable: + - govet + - ineffassign + - misspell + - nolintlint + # - revive + - staticcheck + - unused + + disable: + - errcheck + # - staticcheck + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + + exclusions: + paths: + - gcsfs/internal/stiface + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + - golines + + settings: + gci: + sections: + - standard + - default + - localmodule + + exclusions: + paths: + - gcsfs/internal/stiface diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index 619af574f..ef67e9a77 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -1,442 +1,474 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) +afero logo-sm -A FileSystem Abstraction System for Go -[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) +[![GoDoc](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero)](https://goreportcard.com/report/github.com/spf13/afero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square") -# Overview -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. +# Afero: The Universal Filesystem Abstraction for Go -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with, while retaining all the power -and benefit of the os and ioutil packages. +Afero is a powerful and extensible filesystem abstraction system for Go. It provides a single, unified API for interacting with diverse filesystems—including the local disk, memory, archives, and network storage. -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. +Afero acts as a drop-in replacement for the standard `os` package, enabling you to write modular code that is agnostic to the underlying storage, dramatically simplifies testing, and allows for sophisticated architectural patterns through filesystem composition. -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. +## Why Afero? +Afero elevates filesystem interaction beyond simple file reading and writing, offering solutions for testability, flexibility, and advanced architecture. -## Afero Features +🔑 **Key Features:** -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` +* **Universal API:** Write your code once. Run it against the local OS, in-memory storage, ZIP/TAR archives, or remote systems (SFTP, GCS). +* **Ultimate Testability:** Utilize `MemMapFs`, a fully concurrent-safe, read/write in-memory filesystem. Write fast, isolated, and reliable unit tests without touching the physical disk or worrying about cleanup. +* **Powerful Composition:** Afero's hidden superpower. Layer filesystems on top of each other to create sophisticated behaviors: + * **Sandboxing:** Use `CopyOnWriteFs` to create temporary scratch spaces that isolate changes from the base filesystem. + * **Caching:** Use `CacheOnReadFs` to automatically layer a fast cache (like memory) over a slow backend (like a network drive). + * **Security Jails:** Use `BasePathFs` to restrict application access to a specific subdirectory (chroot). +* **`os` Package Compatibility:** Afero mirrors the functions in the standard `os` package, making adoption and refactoring seamless. +* **`io/fs` Compatibility:** Fully compatible with the Go standard library's `io/fs` interfaces. -# Using Afero +## Installation -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero +```bash +go get github.com/spf13/afero +``` -Next include Afero in your application. ```go import "github.com/spf13/afero" ``` -## Step 2: Declare a backend +## Quick Start: The Power of Abstraction + +The core of Afero is the `afero.Fs` interface. By designing your functions to accept this interface rather than calling `os.*` functions directly, your code instantly becomes more flexible and testable. + +### 1. Refactor Your Code + +Change functions that rely on the `os` package to accept `afero.Fs`. -First define a package variable and set it to a pointer to a filesystem. ```go -var AppFs = afero.NewMemMapFs() +// Before: Coupled to the OS and difficult to test +// func ProcessConfiguration(path string) error { +// data, err := os.ReadFile(path) +// ... +// } -or +import "github.com/spf13/afero" -var AppFs = afero.NewOsFs() +// After: Decoupled, flexible, and testable +func ProcessConfiguration(fs afero.Fs, path string) error { + // Use Afero utility functions which mirror os/ioutil + data, err := afero.ReadFile(fs, path) + // ... process the data + return err +} ``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. -## Step 3: Use it like you would the OS package +### 2. Usage in Production -Throughout your application use any function and method like you normally -would. +In your production environment, inject the `OsFs` backend, which wraps the standard operating system calls. -So if my application before had: ```go -os.Open("/tmp/foo") -``` -We would replace it with: -```go -AppFs.Open("/tmp/foo") +func main() { + // Use the real OS filesystem + AppFs := afero.NewOsFs() + ProcessConfiguration(AppFs, "/etc/myapp.conf") +} ``` -`AppFs` being the variable we defined above. - +### 3. Usage in Testing -## List of all available functions +In your tests, inject `MemMapFs`. This provides a blazing-fast, isolated, in-memory filesystem that requires no disk I/O and no cleanup. -File System Methods Available: ```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error +func TestProcessConfiguration(t *testing.T) { + // Use the in-memory filesystem + AppFs := afero.NewMemMapFs() + + // Pre-populate the memory filesystem for the test + configPath := "/test/config.json" + afero.WriteFile(AppFs, configPath, []byte(`{"feature": true}`), 0644) + + // Run the test entirely in memory + err := ProcessConfiguration(AppFs, configPath) + if err != nil { + t.Fatal(err) + } +} ``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. -## Using Afero's utility functions +## Afero's Superpower: Composition -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. +Afero's most unique feature is its ability to combine filesystems. This allows you to build complex behaviors out of simple components, keeping your application logic clean. -The afero utilities support all afero compatible backends. +### Example 1: Sandboxing with Copy-on-Write -The list of utilities includes: +Create a temporary environment where an application can "modify" system files without affecting the actual disk. ```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) +// 1. The base layer is the real OS, made read-only for safety. +baseFs := afero.NewReadOnlyFs(afero.NewOsFs()) -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. +// 2. The overlay layer is a temporary in-memory filesystem for changes. +overlayFs := afero.NewMemMapFs() -### Calling utilities directly +// 3. Combine them. Reads fall through to the base; writes only hit the overlay. +sandboxFs := afero.NewCopyOnWriteFs(baseFs, overlayFs) -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") +// The application can now "modify" /etc/hosts, but the changes are isolated in memory. +afero.WriteFile(sandboxFs, "/etc/hosts", []byte("127.0.0.1 sandboxed-app"), 0644) +// The real /etc/hosts on disk is untouched. ``` -### Calling via Afero +### Example 2: Caching a Slow Filesystem -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` +Improve performance by layering a fast cache (like memory) over a slow backend (like a network drive or cloud storage). -## Using Afero for Testing +```go +import "time" -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. +// Assume 'remoteFs' is a slow backend (e.g., SFTP or GCS) +var remoteFs afero.Fs -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed +// 'cacheFs' is a fast in-memory backend +cacheFs := afero.NewMemMapFs() -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). +// Create the caching layer. Cache items for 5 minutes upon first read. +cachedFs := afero.NewCacheOnReadFs(remoteFs, cacheFs, 5*time.Minute) -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. +// The first read is slow (fetches from remote, then caches) +data1, _ := afero.ReadFile(cachedFs, "data.json") -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} +// The second read is instant (serves from memory cache) +data2, _ := afero.ReadFile(cachedFs, "data.json") ``` -# Available Backends +### Example 3: Security Jails (chroot) -## Operating System Native +Restrict an application component's access to a specific subdirectory. -### OsFs +```go +osFs := afero.NewOsFs() -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. +// Create a filesystem rooted at /home/user/public +// The application cannot access anything above this directory. +jailedFs := afero.NewBasePathFs(osFs, "/home/user/public") -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) +// To the application, this is reading "/" +// In reality, it's reading "/home/user/public/" +dirInfo, err := afero.ReadDir(jailedFs, "/") + +// Attempts to access parent directories fail +_, err = jailedFs.Open("../secrets.txt") // Returns an error ``` -## Memory Backed Storage +## Real-World Use Cases -### MemMapFs +### Build Cloud-Agnostic Applications -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. +Write applications that seamlessly work with different storage backends: ```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` +type DocumentProcessor struct { + fs afero.Fs +} -#### InMemoryFile +func NewDocumentProcessor(fs afero.Fs) *DocumentProcessor { + return &DocumentProcessor{fs: fs} +} -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. +func (p *DocumentProcessor) Process(inputPath, outputPath string) error { + // This code works whether fs is local disk, cloud storage, or memory + content, err := afero.ReadFile(p.fs, inputPath) + if err != nil { + return err + } + + processed := processContent(content) + return afero.WriteFile(p.fs, outputPath, processed, 0644) +} -## Network Interfaces +// Use with local filesystem +processor := NewDocumentProcessor(afero.NewOsFs()) -### SftpFs +// Use with Google Cloud Storage +processor := NewDocumentProcessor(gcsFS) -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. +// Use with in-memory filesystem for testing +processor := NewDocumentProcessor(afero.NewMemMapFs()) +``` -### GCSFs +### Treating Archives as Filesystems -Afero has experimental support for Google Cloud Storage (GCS). You can either set the -`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in -`NewGcsFS` to configure access to your GCS bucket. +Read files directly from `.zip` or `.tar` archives without unpacking them to disk first. -Some known limitations of the existing implementation: -* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. -* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. -* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. +```go +import ( + "archive/zip" + "github.com/spf13/afero/zipfs" +) + +// Assume 'zipReader' is a *zip.Reader initialized from a file or memory +var zipReader *zip.Reader +// Create a read-only ZipFs +archiveFS := zipfs.New(zipReader) -## Filtering Backends +// Read a file from within the archive using the standard Afero API +content, err := afero.ReadFile(archiveFS, "/docs/readme.md") +``` -### BasePathFs +### Serving Any Filesystem over HTTP -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. +Use `HttpFs` to expose any Afero filesystem—even one created dynamically in memory—through a standard Go web server. ```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` +import ( + "net/http" + "github.com/spf13/afero" +) -### ReadOnlyFs +func main() { + memFS := afero.NewMemMapFs() + afero.WriteFile(memFS, "index.html", []byte("

Hello from Memory!

"), 0644) -A thin wrapper around the source Fs providing a read only view. + // Wrap the memory filesystem to make it compatible with http.FileServer. + httpFS := afero.NewHttpFs(memFS) -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM + http.Handle("/", http.FileServer(httpFS.Dir("/"))) + http.ListenAndServe(":8080", nil) +} ``` -# RegexpFs +### Testing Made Simple -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. +One of Afero's greatest strengths is making filesystem-dependent code easily testable: ```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` +func SaveUserData(fs afero.Fs, userID string, data []byte) error { + filename := fmt.Sprintf("users/%s.json", userID) + return afero.WriteFile(fs, filename, data, 0644) +} -### HttpFs +func TestSaveUserData(t *testing.T) { + // Create a clean, fast, in-memory filesystem for testing + testFS := afero.NewMemMapFs() + + userData := []byte(`{"name": "John", "email": "john@example.com"}`) + err := SaveUserData(testFS, "123", userData) + + if err != nil { + t.Fatalf("SaveUserData failed: %v", err) + } + + // Verify the file was saved correctly + saved, err := afero.ReadFile(testFS, "users/123.json") + if err != nil { + t.Fatalf("Failed to read saved file: %v", err) + } + + if string(saved) != string(userData) { + t.Errorf("Data mismatch: got %s, want %s", saved, userData) + } +} +``` -Afero provides an http compatible backend which can wrap any of the existing -backends. +**Benefits of testing with Afero:** +- ⚡ **Fast** - No disk I/O, tests run in memory +- 🔄 **Reliable** - Each test starts with a clean slate +- 🧹 **No cleanup** - Memory is automatically freed +- 🔒 **Safe** - Can't accidentally modify real files +- 🏃 **Parallel** - Tests can run concurrently without conflicts + +## Backend Reference + +| Type | Backend | Constructor | Description | Status | +| :--- | :--- | :--- | :--- | :--- | +| **Core** | **OsFs** | `afero.NewOsFs()` | Interacts with the real operating system filesystem. Use in production. | ✅ Official | +| | **MemMapFs** | `afero.NewMemMapFs()` | A fast, atomic, concurrent-safe, in-memory filesystem. Ideal for testing. | ✅ Official | +| **Composition** | **CopyOnWriteFs**| `afero.NewCopyOnWriteFs(base, overlay)` | A read-only base with a writable overlay. Ideal for sandboxing. | ✅ Official | +| | **CacheOnReadFs**| `afero.NewCacheOnReadFs(base, cache, ttl)` | Lazily caches files from a slow base into a fast layer on first read. | ✅ Official | +| | **BasePathFs** | `afero.NewBasePathFs(source, path)` | Restricts operations to a subdirectory (chroot/jail). | ✅ Official | +| | **ReadOnlyFs** | `afero.NewReadOnlyFs(source)` | Provides a read-only view, preventing any modifications. | ✅ Official | +| | **RegexpFs** | `afero.NewRegexpFs(source, regexp)` | Filters a filesystem, only showing files that match a regex. | ✅ Official | +| **Utility** | **HttpFs** | `afero.NewHttpFs(source)` | Wraps any Afero filesystem to be served via `http.FileServer`. | ✅ Official | +| **Archives** | **ZipFs** | `zipfs.New(zipReader)` | Read-only access to files within a ZIP archive. | ✅ Official | +| | **TarFs** | `tarfs.New(tarReader)` | Read-only access to files within a TAR archive. | ✅ Official | +| **Network** | **GcsFs** | `gcsfs.NewGcsFs(...)` | Google Cloud Storage backend. | ⚡ Experimental | +| | **SftpFs** | `sftpfs.New(...)` | SFTP backend. | ⚡ Experimental | +| **3rd Party Cloud** | **S3Fs** | [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) | Production-ready S3 backend built on official AWS SDK. | 🔹 3rd Party | +| | **MinioFs** | [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) | MinIO object storage backend with S3 compatibility. | 🔹 3rd Party | +| | **DriveFs** | [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) | Google Drive backend with streaming support. | 🔹 3rd Party | +| | **DropboxFs** | [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) | Dropbox backend with streaming support. | 🔹 3rd Party | +| **3rd Party Specialized** | **GitFs** | [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) | Git repository filesystem (read-only, Afero compatible). | 🔹 3rd Party | +| | **DockerFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Docker container filesystem access. | 🔹 3rd Party | +| | **GitHubFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | GitHub repository and releases filesystem. | 🔹 3rd Party | +| | **FilterFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Filesystem filtering with predicates. | 🔹 3rd Party | +| | **IgnoreFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | .gitignore-aware filtering filesystem. | 🔹 3rd Party | +| | **FUSEFs** | [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) | Generic FUSE implementation using any Afero backend. | 🔹 3rd Party | + +## Afero vs. `io/fs` (Go 1.16+) + +Go 1.16 introduced the `io/fs` package, which provides a standard abstraction for **read-only** filesystems. + +Afero complements `io/fs` by focusing on different needs: + +* **Use `io/fs` when:** You only need to read files and want to conform strictly to the standard library interfaces. +* **Use Afero when:** + * Your application needs to **create, write, modify, or delete** files. + * You need to test complex read/write interactions (e.g., renaming, concurrent writes). + * You need advanced compositional features (Copy-on-Write, Caching, etc.). + +Afero is fully compatible with `io/fs`. You can wrap any Afero filesystem to satisfy the `fs.FS` interface using `afero.NewIOFS`: -The Http package requires a slightly specific version of Open which -returns an http.File type. +```go +import "io/fs" -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. +// Create an Afero filesystem (writable) +var myAferoFs afero.Fs = afero.NewMemMapFs() -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) +// Convert it to a standard library fs.FS (read-only view) +var myIoFs fs.FS = afero.NewIOFS(myAferoFs) ``` -## Composite Backends +## Third-Party Backends & Ecosystem -Afero provides the ability have two filesystems (or more) act as a single -file system. +The Afero community has developed numerous backends and tools that extend the library's capabilities. Below are curated, well-maintained options organized by maturity and reliability. -### CacheOnReadFs +### Featured Community Backends -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. +These are mature, reliable backends that we can confidently recommend for production use: -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. +#### **Amazon S3** - [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) +Production-ready S3 backend built on the official AWS SDK for Go. -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). +```go +import "github.com/fclairamb/afero-s3" -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. +s3fs := s3.NewFs(bucket, session) +``` -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. +#### **MinIO** - [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) +MinIO object storage backend providing S3-compatible object storage with deduplication and optimization features. ```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +import "github.com/cpyun/afero-minio" + +minioFs := miniofs.NewMinioFs(ctx, "minio://endpoint/bucket") ``` -### CopyOnWriteFs() +### Community & Specialized Backends -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. +#### Cloud Storage -Read operations will first look in the overlay and if not found there, will -serve the file from the base. +- **Google Drive** - [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) + Streaming support; no write-seeking or POSIX permissions; no files listing cache -Changes to the file system will only be made in the overlay. +- **Dropbox** - [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) + Streaming support; no write-seeking or POSIX permissions -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). +#### Version Control Systems -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. +- **Git Repositories** - [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) + Read-only filesystem abstraction for Git repositories. Works with bare repositories and provides filesystem view of any git reference. Uses go-git for repository access. -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) +#### Container and Remote Systems - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` +- **Docker Containers** - [`unmango/aferox`](https://github.com/unmango/aferox) + Access Docker container filesystems as if they were local filesystems -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. +- **GitHub API** - [`unmango/aferox`](https://github.com/unmango/aferox) + Turn GitHub repositories, releases, and assets into browsable filesystems +#### FUSE Integration -## Desired/possible backends +- **Generic FUSE** - [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) + Mount any Afero filesystem as a FUSE filesystem, allowing any Afero backend to be used as a real mounted filesystem -The following is a short list of possible backends we hope someone will -implement: +#### Specialized Filesystems -* SSH -* S3 +- **FAT32 Support** - [`aligator/GoFAT`](https://github.com/aligator/GoFAT) + Pure Go FAT filesystem implementation (currently read-only) -# About the project +### Interface Adapters & Utilities -## What's in the name +**Cross-Interface Compatibility:** +- [`jfontan/go-billy-desfacer`](https://github.com/jfontan/go-billy-desfacer) - Adapter between Afero and go-billy interfaces (for go-git compatibility) +- [`Maldris/go-billy-afero`](https://github.com/Maldris/go-billy-afero) - Alternative wrapper for using Afero with go-billy +- [`c4milo/afero2billy`](https://github.com/c4milo/afero2billy) - Another Afero to billy filesystem adapter -Afero comes from the latin roots Ad-Facere. +**Working Directory Management:** +- [`carolynvs/aferox`](https://github.com/carolynvs/aferox) - Working directory-aware filesystem wrapper -**"Ad"** is a prefix meaning "to". +**Advanced Filtering:** +- [`unmango/aferox`](https://github.com/unmango/aferox) includes multiple specialized filesystems: + - **FilterFs** - Predicate-based file filtering + - **IgnoreFs** - .gitignore-aware filtering + - **WriterFs** - Dump writes to io.Writer for debugging -**"Facere"** is a form of the root "faciō" making "make or do". +#### Developer Tools & Utilities -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. +**nhatthm Utility Suite** - Essential tools for Afero development: +- [`nhatthm/aferocopy`](https://github.com/nhatthm/aferocopy) - Copy files between any Afero filesystems +- [`nhatthm/aferomock`](https://github.com/nhatthm/aferomock) - Mocking toolkit for testing +- [`nhatthm/aferoassert`](https://github.com/nhatthm/aferoassert) - Assertion helpers for filesystem testing -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". +### Ecosystem Showcase -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. +**Windows Virtual Drives** - [`balazsgrill/potatodrive`](https://github.com/balazsgrill/potatodrive) +Mount any Afero filesystem as a Windows drive letter. Brilliant demonstration of Afero's power! -## Release Notes +### Modern Asset Embedding (Go 1.16+) -See the [Releases Page](https://github.com/spf13/afero/releases). +Instead of third-party tools, use Go's native `//go:embed` with Afero: + +```go +import ( + "embed" + "github.com/spf13/afero" +) + +//go:embed assets/* +var assetsFS embed.FS + +func main() { + // Convert embedded files to Afero filesystem + fs := afero.FromIOFS(assetsFS) + + // Use like any other Afero filesystem + content, _ := afero.ReadFile(fs, "assets/config.json") +} +``` ## Contributing -1. Fork it +We welcome contributions! The project is mature, but we are actively looking for contributors to help implement and stabilize network/cloud backends. + +* 🔥 **Microsoft Azure Blob Storage** +* 🔒 **Modern Encryption Backend** - Built on secure, contemporary crypto (not legacy EncFS) +* 🐙 **Canonical go-git Adapter** - Unified solution for Git integration +* 📡 **SSH/SCP Backend** - Secure remote file operations +* Stabilization of existing experimental backends (GCS, SFTP) + +To contribute: +1. Fork the repository 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request +5. Create a new Pull Request + +## 📄 License -## Contributors +Afero is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) for details. -Names in no particular order: +## 🔗 Additional Resources -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) +- [📖 Full API Documentation](https://pkg.go.dev/github.com/spf13/afero) +- [🎯 Examples Repository](https://github.com/spf13/afero/tree/master/examples) +- [📋 Release Notes](https://github.com/spf13/afero/releases) +- [❓ GitHub Discussions](https://github.com/spf13/afero/discussions) -## License +--- -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) +*Afero comes from the Latin roots Ad-Facere, meaning "to make" or "to do" - fitting for a library that empowers you to make and do amazing things with filesystems.* diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go index 184d6dd70..aba2879eb 100644 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -34,7 +34,8 @@ func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { _, err := u.base.Stat(name) if err != nil { if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || + oerr.Err == syscall.ENOTDIR { return false, nil } } @@ -237,7 +238,11 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, return u.layer.OpenFile(name, flag, perm) } - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + return nil, &os.PathError{ + Op: "open", + Path: name, + Err: syscall.ENOTDIR, + } // ...or os.ErrNotExist? } if b { return u.base.OpenFile(name, flag, perm) diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go index b13155ca4..57ba5673e 100644 --- a/vendor/github.com/spf13/afero/iofs.go +++ b/vendor/github.com/spf13/afero/iofs.go @@ -137,7 +137,7 @@ type readDirFile struct { var _ fs.ReadDirFile = readDirFile{} func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) + items, err := r.Readdir(n) if err != nil { return nil, err } @@ -161,7 +161,12 @@ var _ Fs = FromIOFS{} func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } +func (f FromIOFS) Mkdir( + name string, + perm os.FileMode, +) error { + return notImplemented("mkdir", name) +} func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { return notImplemented("mkdirall", path) diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go index 89c1bfc0a..2dcbdb1f0 100644 --- a/vendor/github.com/spf13/afero/lstater.go +++ b/vendor/github.com/spf13/afero/lstater.go @@ -19,9 +19,9 @@ import ( // Lstater is an optional interface in Afero. It is only implemented by the // filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// It will call Lstat if the filesystem itself is, or it delegates to, the os filesystem. // Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +// In addition to the FileInfo, it will return a boolean telling whether Lstat was called or not. type Lstater interface { LstatIfPossible(name string) (os.FileInfo, bool, error) } diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 62fe4498e..c77fcd40e 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -150,7 +150,11 @@ func (f *File) Sync() error { func (f *File) Readdir(count int) (res []os.FileInfo, err error) { if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + return nil, &os.PathError{ + Op: "readdir", + Path: f.fileData.name, + Err: errors.New("not a dir"), + } } var outLength int64 @@ -236,7 +240,11 @@ func (f *File) Truncate(size int64) error { return ErrFileClosed } if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + return &os.PathError{ + Op: "truncate", + Path: f.fileData.name, + Err: errors.New("file handle is read only"), + } } if size < 0 { return ErrOutOfRange @@ -273,7 +281,11 @@ func (f *File) Write(b []byte) (n int, err error) { return 0, ErrFileClosed } if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + return 0, &os.PathError{ + Op: "write", + Path: f.fileData.name, + Err: errors.New("file handle is read only"), + } } n = len(b) cur := atomic.LoadInt64(&f.at) @@ -285,7 +297,9 @@ func (f *File) Write(b []byte) (n int, err error) { tail = f.fileData.data[n+int(cur):] } if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) + f.fileData.data = append( + f.fileData.data, + append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) f.fileData.data = append(f.fileData.data, tail...) } else { f.fileData.data = append(f.fileData.data[:cur], b...) diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 62dd6c93c..2e2253f55 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -92,7 +92,8 @@ func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { func (f *UnionFile) Write(s []byte) (n int, err error) { if f.Layer != nil { n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + if err == nil && + f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? _, err = f.Base.Write(s) } return n, err @@ -157,7 +158,7 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err // return a single view of the overlayed directories. // At the end of the directory view, the error is io.EOF if c > 0. func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger + merge := f.Merger if merge == nil { merge = defaultUnionMergeDirsFn } diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go index 9e4cba274..231768838 100644 --- a/vendor/github.com/spf13/afero/util.go +++ b/vendor/github.com/spf13/afero/util.go @@ -113,11 +113,11 @@ func GetTempDir(fs Fs, subPath string) string { if subPath != "" { // preserve windows backslash :-( if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) + subPath = strings.ReplaceAll(subPath, "\\", "____") } dir = dir + UnicodeSanitize((subPath)) if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) + dir = strings.ReplaceAll(dir, "____", "\\") } if exists, _ := Exists(fs, dir); exists { diff --git a/vendor/github.com/spf13/cast/.editorconfig b/vendor/github.com/spf13/cast/.editorconfig new file mode 100644 index 000000000..a85749f19 --- /dev/null +++ b/vendor/github.com/spf13/cast/.editorconfig @@ -0,0 +1,15 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/vendor/github.com/spf13/cast/.golangci.yaml b/vendor/github.com/spf13/cast/.golangci.yaml new file mode 100644 index 000000000..e00fd47aa --- /dev/null +++ b/vendor/github.com/spf13/cast/.golangci.yaml @@ -0,0 +1,39 @@ +version: "2" + +run: + timeout: 10m + +linters: + enable: + - errcheck + - govet + - ineffassign + - misspell + - nolintlint + # - revive + - unused + + disable: + - staticcheck + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +formatters: + enable: + - gci + - gofmt + # - gofumpt + - goimports + # - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index 1be666a45..c58eccb3f 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,9 +1,9 @@ # cast -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/test.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/test.yaml) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/spf13/cast) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/spf13/cast?style=flat-square&color=61CFDD) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/spf13/cast/badge?style=flat-square)](https://deps.dev/go/github.com%252Fspf13%252Fcast) Easy and safe casting from one type to another in Go @@ -73,3 +73,7 @@ the code for a complete set. var eight interface{} = 8 cast.ToInt(eight) // 8 cast.ToInt(nil) // 0 + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/spf13/cast/alias.go b/vendor/github.com/spf13/cast/alias.go new file mode 100644 index 000000000..855d60005 --- /dev/null +++ b/vendor/github.com/spf13/cast/alias.go @@ -0,0 +1,69 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. +package cast + +import ( + "reflect" + "slices" +) + +var kindNames = []string{ + reflect.String: "string", + reflect.Bool: "bool", + reflect.Int: "int", + reflect.Int8: "int8", + reflect.Int16: "int16", + reflect.Int32: "int32", + reflect.Int64: "int64", + reflect.Uint: "uint", + reflect.Uint8: "uint8", + reflect.Uint16: "uint16", + reflect.Uint32: "uint32", + reflect.Uint64: "uint64", + reflect.Float32: "float32", + reflect.Float64: "float64", +} + +var kinds = map[reflect.Kind]func(reflect.Value) any{ + reflect.String: func(v reflect.Value) any { return v.String() }, + reflect.Bool: func(v reflect.Value) any { return v.Bool() }, + reflect.Int: func(v reflect.Value) any { return int(v.Int()) }, + reflect.Int8: func(v reflect.Value) any { return int8(v.Int()) }, + reflect.Int16: func(v reflect.Value) any { return int16(v.Int()) }, + reflect.Int32: func(v reflect.Value) any { return int32(v.Int()) }, + reflect.Int64: func(v reflect.Value) any { return v.Int() }, + reflect.Uint: func(v reflect.Value) any { return uint(v.Uint()) }, + reflect.Uint8: func(v reflect.Value) any { return uint8(v.Uint()) }, + reflect.Uint16: func(v reflect.Value) any { return uint16(v.Uint()) }, + reflect.Uint32: func(v reflect.Value) any { return uint32(v.Uint()) }, + reflect.Uint64: func(v reflect.Value) any { return v.Uint() }, + reflect.Float32: func(v reflect.Value) any { return float32(v.Float()) }, + reflect.Float64: func(v reflect.Value) any { return v.Float() }, +} + +// resolveAlias attempts to resolve a named type to its underlying basic type (if possible). +// +// Pointers are expected to be indirected by this point. +func resolveAlias(i any) (any, bool) { + if i == nil { + return nil, false + } + + t := reflect.TypeOf(i) + + // Not a named type + if t.Name() == "" || slices.Contains(kindNames, t.Name()) { + return i, false + } + + resolve, ok := kinds[t.Kind()] + if !ok { // Not a supported kind + return i, false + } + + v := reflect.ValueOf(i) + + return resolve(v), true +} diff --git a/vendor/github.com/spf13/cast/basic.go b/vendor/github.com/spf13/cast/basic.go new file mode 100644 index 000000000..fa330e207 --- /dev/null +++ b/vendor/github.com/spf13/cast/basic.go @@ -0,0 +1,131 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "fmt" + "html/template" + "strconv" + "time" +) + +// ToBoolE casts any value to a bool type. +func ToBoolE(i any) (bool, error) { + i, _ = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + return b != 0, nil + case int8: + return b != 0, nil + case int16: + return b != 0, nil + case int32: + return b != 0, nil + case int64: + return b != 0, nil + case uint: + return b != 0, nil + case uint8: + return b != 0, nil + case uint16: + return b != 0, nil + case uint32: + return b != 0, nil + case uint64: + return b != 0, nil + case float32: + return b != 0, nil + case float64: + return b != 0, nil + case time.Duration: + return b != 0, nil + case string: + return strconv.ParseBool(b) + case json.Number: + v, err := ToInt64E(b) + if err == nil { + return v != 0, nil + } + + return false, fmt.Errorf(errorMsg, i, i, false) + default: + if i, ok := resolveAlias(i); ok { + return ToBoolE(i) + } + + return false, fmt.Errorf(errorMsg, i, i, false) + } +} + +// ToStringE casts any value to a string type. +func ToStringE(i any) (string, error) { + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int32: + return strconv.FormatInt(int64(s), 10), nil + case int64: + return strconv.FormatInt(s, 10), nil + case uint: + return strconv.FormatUint(uint64(s), 10), nil + case uint8: + return strconv.FormatUint(uint64(s), 10), nil + case uint16: + return strconv.FormatUint(uint64(s), 10), nil + case uint32: + return strconv.FormatUint(uint64(s), 10), nil + case uint64: + return strconv.FormatUint(s, 10), nil + case json.Number: + return s.String(), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + if i, ok := indirect(i); ok { + return ToStringE(i) + } + + if i, ok := resolveAlias(i); ok { + return ToStringE(i) + } + + return "", fmt.Errorf(errorMsg, i, i, "") + } +} diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go index 0cfe9418d..8d85539b3 100644 --- a/vendor/github.com/spf13/cast/cast.go +++ b/vendor/github.com/spf13/cast/cast.go @@ -8,169 +8,77 @@ package cast import "time" -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v -} - -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v -} - -func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { - v, _ := ToTimeInDefaultLocationE(i, location) - return v -} - -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} - -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMapInt casts an interface to a map[string]int type. -func ToStringMapInt(i interface{}) map[string]int { - v, _ := ToStringMapIntE(i) - return v -} - -// ToStringMapInt64 casts an interface to a map[string]int64 type. -func ToStringMapInt64(i interface{}) map[string]int64 { - v, _ := ToStringMapInt64E(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} +const errorMsg = "unable to cast %#v of type %T to %T" +const errorMsgWith = "unable to cast %#v of type %T to %T: %w" -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} +// Basic is a type parameter constraint for functions accepting basic types. +// +// It represents the supported basic types this package can cast to. +type Basic interface { + string | bool | Number | time.Time | time.Duration +} + +// ToE casts any value to a [Basic] type. +func ToE[T Basic](i any) (T, error) { + var t T + + var v any + var err error + + switch any(t).(type) { + case string: + v, err = ToStringE(i) + case bool: + v, err = ToBoolE(i) + case int: + v, err = toNumberE[int](i, parseInt[int]) + case int8: + v, err = toNumberE[int8](i, parseInt[int8]) + case int16: + v, err = toNumberE[int16](i, parseInt[int16]) + case int32: + v, err = toNumberE[int32](i, parseInt[int32]) + case int64: + v, err = toNumberE[int64](i, parseInt[int64]) + case uint: + v, err = toUnsignedNumberE[uint](i, parseUint[uint]) + case uint8: + v, err = toUnsignedNumberE[uint8](i, parseUint[uint8]) + case uint16: + v, err = toUnsignedNumberE[uint16](i, parseUint[uint16]) + case uint32: + v, err = toUnsignedNumberE[uint32](i, parseUint[uint32]) + case uint64: + v, err = toUnsignedNumberE[uint64](i, parseUint[uint64]) + case float32: + v, err = toNumberE[float32](i, parseFloat[float32]) + case float64: + v, err = toNumberE[float64](i, parseFloat[float64]) + case time.Time: + v, err = ToTimeE(i) + case time.Duration: + v, err = ToDurationE(i) + } + + if err != nil { + return t, err + } + + return v.(T), nil +} + +// Must is a helper that wraps a call to a cast function and panics if the error is non-nil. +func Must[T any](i any, err error) T { + if err != nil { + panic(err) + } + + return i.(T) +} + +// To casts any value to a [Basic] type. +func To[T Basic](i any) T { + v, _ := ToE[T](i) -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) return v } diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index 4181a2e75..000000000 --- a/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1510 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -type float64EProvider interface { - Float64() (float64, error) -} - -type float64Provider interface { - Float64() float64 -} - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - return ToTimeInDefaultLocationE(i, time.UTC) -} - -// ToTimeInDefaultLocationE casts an empty interface to time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDateInDefaultLocation(v, location) - case json.Number: - s, err1 := ToInt64E(v) - if err1 != nil { - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } - return time.Unix(s, 0), nil - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - case float64EProvider: - var v float64 - v, err = s.Float64() - d = time.Duration(v) - return - case float64Provider: - d = time.Duration(s.Float64()) - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - return b != 0, nil - case int64: - return b != 0, nil - case int32: - return b != 0, nil - case int16: - return b != 0, nil - case int8: - return b != 0, nil - case uint: - return b != 0, nil - case uint64: - return b != 0, nil - case uint32: - return b != 0, nil - case uint16: - return b != 0, nil - case uint8: - return b != 0, nil - case float64: - return b != 0, nil - case float32: - return b != 0, nil - case time.Duration: - return b != 0, nil - case string: - return strconv.ParseBool(i.(string)) - case json.Number: - v, err := ToInt64E(b) - if err == nil { - return v != 0, nil - } - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float64(intv), nil - } - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64Provider: - return s.Float64(), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float32(intv), nil - } - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64Provider: - return float32(s.Float64()), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int64(intv), nil - } - - switch s := i.(type) { - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToInt64E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int32(intv), nil - } - - switch s := i.(type) { - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case json.Number: - return ToInt32E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int16(intv), nil - } - - switch s := i.(type) { - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case json.Number: - return ToInt16E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int8(intv), nil - } - - switch s := i.(type) { - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case json.Number: - return ToInt8E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return intv, nil - } - - switch s := i.(type) { - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToIntE(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - case json.Number: - return ToUintE(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint64(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - case json.Number: - return ToUint64E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint32(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - case json.Number: - return ToUint32E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint16(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - case json.Number: - return ToUint16E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint8(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - case json.Number: - return ToUint8E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - errorType := reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatUint(uint64(s), 10), nil - case uint64: - return strconv.FormatUint(uint64(s), 10), nil - case uint32: - return strconv.FormatUint(uint64(s), 10), nil - case uint16: - return strconv.FormatUint(uint64(s), 10), nil - case uint8: - return strconv.FormatUint(uint64(s), 10), nil - case json.Number: - return s.String(), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i interface{}) (map[string]string, error) { - m := map[string]string{} - - switch v := i.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) - } -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - m := map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - m := map[string]bool{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]bool: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) - } -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - m := map[string]interface{}{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = val - } - return m, nil - case map[string]interface{}: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) - } -} - -// ToStringMapIntE casts an interface to a map[string]int{} type. -func ToStringMapIntE(i interface{}) (map[string]int, error) { - m := map[string]int{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt(val) - } - return m, nil - case map[string]int: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToIntE(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToStringMapInt64E casts an interface to a map[string]int64{} type. -func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - m := map[string]int64{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt64(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt64(val) - } - return m, nil - case map[string]int64: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToInt64E(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - if i == nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - - switch v := i.(type) { - case []bool: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]bool, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToBoolE(s.Index(j).Interface()) - if err != nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - a[j] = val - } - return a, nil - default: - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case []int8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case string: - return strings.Fields(v), nil - case []error: - for _, err := range i.([]error) { - a = append(a, err.Error()) - } - return a, nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - if i == nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - - switch v := i.(type) { - case []int: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToIntE(s.Index(j).Interface()) - if err != nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - a[j] = val - } - return a, nil - default: - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - if i == nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - - switch v := i.(type) { - case []time.Duration: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]time.Duration, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToDurationE(s.Index(j).Interface()) - if err != nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - a[j] = val - } - return a, nil - default: - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, time.UTC, timeFormats) -} - -// StringToDateInDefaultLocation casts an empty interface to a time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { - return parseDateWith(s, location, timeFormats) -} - -type timeFormatType int - -const ( - timeFormatNoTimezone timeFormatType = iota - timeFormatNamedTimezone - timeFormatNumericTimezone - timeFormatNumericAndNamedTimezone - timeFormatTimeOnly -) - -type timeFormat struct { - format string - typ timeFormatType -} - -func (f timeFormat) hasTimezone() bool { - // We don't include the formats with only named timezones, see - // https://github.com/golang/go/issues/19694#issuecomment-289103522 - return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone -} - -var timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, -} - -func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { - if d, e = time.Parse(format.format, s); e == nil { - - // Some time formats have a zone name, but no offset, so it gets - // put in that zone name (not the default one passed in to us), but - // without that zone's offset. So set the location manually. - if format.typ <= timeFormatNamedTimezone { - if location == nil { - location = time.Local - } - year, month, day := d.Date() - hour, min, sec := d.Clock() - d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) - } - - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} - -// toInt returns the int value of v if v or v's underlying type -// is an int. -// Note that this will return false for int64 etc. types. -func toInt(v interface{}) (int, bool) { - switch v := v.(type) { - case int: - return v, true - case time.Weekday: - return int(v), true - case time.Month: - return int(v), true - default: - return 0, false - } -} - -func trimZeroDecimal(s string) string { - var foundZero bool - for i := len(s); i > 0; i-- { - switch s[i-1] { - case '.': - if foundZero { - return s[:i-1] - } - case '0': - foundZero = true - default: - return s - } - } - return s -} diff --git a/vendor/github.com/spf13/cast/indirect.go b/vendor/github.com/spf13/cast/indirect.go new file mode 100644 index 000000000..093345f73 --- /dev/null +++ b/vendor/github.com/spf13/cast/indirect.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "reflect" +) + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(i any) (any, bool) { + if i == nil { + return nil, false + } + + if t := reflect.TypeOf(i); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return i, false + } + + v := reflect.ValueOf(i) + + for v.Kind() == reflect.Ptr || (v.Kind() == reflect.Interface && v.Elem().Kind() == reflect.Ptr) { + if v.IsNil() { + return nil, true + } + + v = v.Elem() + } + + return v.Interface(), true +} diff --git a/vendor/github.com/spf13/cast/internal/time.go b/vendor/github.com/spf13/cast/internal/time.go new file mode 100644 index 000000000..906e9aece --- /dev/null +++ b/vendor/github.com/spf13/cast/internal/time.go @@ -0,0 +1,79 @@ +package internal + +import ( + "fmt" + "time" +) + +//go:generate stringer -type=TimeFormatType + +type TimeFormatType int + +const ( + TimeFormatNoTimezone TimeFormatType = iota + TimeFormatNamedTimezone + TimeFormatNumericTimezone + TimeFormatNumericAndNamedTimezone + TimeFormatTimeOnly +) + +type TimeFormat struct { + Format string + Typ TimeFormatType +} + +func (f TimeFormat) HasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.Typ >= TimeFormatNumericTimezone && f.Typ <= TimeFormatNumericAndNamedTimezone +} + +var TimeFormats = []TimeFormat{ + // Keep common formats at the top. + {"2006-01-02", TimeFormatNoTimezone}, + {time.RFC3339, TimeFormatNumericTimezone}, + {"2006-01-02T15:04:05", TimeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, TimeFormatNumericTimezone}, + {time.RFC1123, TimeFormatNamedTimezone}, + {time.RFC822Z, TimeFormatNumericTimezone}, + {time.RFC822, TimeFormatNamedTimezone}, + {time.RFC850, TimeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", TimeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", TimeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", TimeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", TimeFormatNoTimezone}, + {time.ANSIC, TimeFormatNoTimezone}, + {time.UnixDate, TimeFormatNamedTimezone}, + {time.RubyDate, TimeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", TimeFormatNumericTimezone}, + {"02 Jan 2006", TimeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", TimeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", TimeFormatNumericTimezone}, + {time.Kitchen, TimeFormatTimeOnly}, + {time.Stamp, TimeFormatTimeOnly}, + {time.StampMilli, TimeFormatTimeOnly}, + {time.StampMicro, TimeFormatTimeOnly}, + {time.StampNano, TimeFormatTimeOnly}, +} + +func ParseDateWith(s string, location *time.Location, formats []TimeFormat) (d time.Time, e error) { + for _, format := range formats { + if d, e = time.Parse(format.Format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.Typ <= TimeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} diff --git a/vendor/github.com/spf13/cast/internal/timeformattype_string.go b/vendor/github.com/spf13/cast/internal/timeformattype_string.go new file mode 100644 index 000000000..60a29a862 --- /dev/null +++ b/vendor/github.com/spf13/cast/internal/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=TimeFormatType"; DO NOT EDIT. + +package internal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TimeFormatNoTimezone-0] + _ = x[TimeFormatNamedTimezone-1] + _ = x[TimeFormatNumericTimezone-2] + _ = x[TimeFormatNumericAndNamedTimezone-3] + _ = x[TimeFormatTimeOnly-4] +} + +const _TimeFormatType_name = "TimeFormatNoTimezoneTimeFormatNamedTimezoneTimeFormatNumericTimezoneTimeFormatNumericAndNamedTimezoneTimeFormatTimeOnly" + +var _TimeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i TimeFormatType) String() string { + if i < 0 || i >= TimeFormatType(len(_TimeFormatType_index)-1) { + return "TimeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _TimeFormatType_name[_TimeFormatType_index[i]:_TimeFormatType_index[i+1]] +} diff --git a/vendor/github.com/spf13/cast/map.go b/vendor/github.com/spf13/cast/map.go new file mode 100644 index 000000000..858d4ee43 --- /dev/null +++ b/vendor/github.com/spf13/cast/map.go @@ -0,0 +1,212 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "fmt" + "reflect" +) + +func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (map[K]V, error) { + m := map[K]V{} + + if i == nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + switch v := i.(type) { + case map[K]V: + return v, nil + + case map[K]any: + for k, val := range v { + m[k] = valFn(val) + } + + return m, nil + + case map[any]V: + for k, val := range v { + m[keyFn(k)] = val + } + + return m, nil + + case map[any]any: + for k, val := range v { + m[keyFn(k)] = valFn(val) + } + + return m, nil + + case string: + err := jsonStringToObject(v, &m) + return m, err + + default: + return m, fmt.Errorf(errorMsg, i, i, m) + } +} + +func toStringMapE[T any](i any, fn func(any) T) (map[string]T, error) { + return toMapE(i, ToString, fn) +} + +// ToStringMapStringE casts any value to a map[string]string type. +func ToStringMapStringE(i any) (map[string]string, error) { + return toStringMapE(i, ToString) +} + +// ToStringMapStringSliceE casts any value to a map[string][]string type. +func ToStringMapStringSliceE(i any) (map[string][]string, error) { + m := map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]any: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]any: + for k, val := range v { + switch vt := val.(type) { + case []any: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[any][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any][]any: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any]any: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + value, err := ToStringSliceE(val) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf(errorMsg, i, i, m) + } + + return m, nil +} + +// ToStringMapBoolE casts any value to a map[string]bool type. +func ToStringMapBoolE(i any) (map[string]bool, error) { + return toStringMapE(i, ToBool) +} + +// ToStringMapE casts any value to a map[string]any type. +func ToStringMapE(i any) (map[string]any, error) { + fn := func(i any) any { return i } + + return toStringMapE(i, fn) +} + +func toStringMapIntE[T int | int64](i any, fn func(any) T, fnE func(any) (T, error)) (map[string]T, error) { + m := map[string]T{} + + if i == nil { + return nil, fmt.Errorf(errorMsg, i, i, m) + } + + switch v := i.(type) { + case map[string]T: + return v, nil + + case map[string]any: + for k, val := range v { + m[k] = fn(val) + } + + return m, nil + + case map[any]T: + for k, val := range v { + m[ToString(k)] = val + } + + return m, nil + + case map[any]any: + for k, val := range v { + m[ToString(k)] = fn(val) + } + + return m, nil + + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + + for _, keyVal := range v.MapKeys() { + val, err := fnE(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + + return m, nil +} + +// ToStringMapIntE casts any value to a map[string]int type. +func ToStringMapIntE(i any) (map[string]int, error) { + return toStringMapIntE(i, ToInt, ToIntE) +} + +// ToStringMapInt64E casts any value to a map[string]int64 type. +func ToStringMapInt64E(i any) (map[string]int64, error) { + return toStringMapIntE(i, ToInt64, ToInt64E) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v any) error { + data := []byte(s) + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/spf13/cast/number.go b/vendor/github.com/spf13/cast/number.go new file mode 100644 index 000000000..a58dc4d1e --- /dev/null +++ b/vendor/github.com/spf13/cast/number.go @@ -0,0 +1,549 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + +// Number is a type parameter constraint for functions accepting number types. +// +// It represents the supported number types this package can cast to. +type Number interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64 +} + +type integer interface { + int | int8 | int16 | int32 | int64 +} + +type unsigned interface { + uint | uint8 | uint16 | uint32 | uint64 +} + +type float interface { + float32 | float64 +} + +// ToNumberE casts any value to a [Number] type. +func ToNumberE[T Number](i any) (T, error) { + var t T + + switch any(t).(type) { + case int: + return toNumberE[T](i, parseNumber[T]) + case int8: + return toNumberE[T](i, parseNumber[T]) + case int16: + return toNumberE[T](i, parseNumber[T]) + case int32: + return toNumberE[T](i, parseNumber[T]) + case int64: + return toNumberE[T](i, parseNumber[T]) + case uint: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint8: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint16: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint32: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint64: + return toUnsignedNumberE[T](i, parseNumber[T]) + case float32: + return toNumberE[T](i, parseNumber[T]) + case float64: + return toNumberE[T](i, parseNumber[T]) + default: + return 0, fmt.Errorf("unknown number type: %T", t) + } +} + +// ToNumber casts any value to a [Number] type. +func ToNumber[T Number](i any) T { + v, _ := ToNumberE[T](i) + + return v +} + +// toNumber's semantics differ from other "to" functions. +// It returns false as the second parameter if the conversion fails. +// This is to signal other callers that they should proceed with their own conversions. +func toNumber[T Number](i any) (T, bool) { + i, _ = indirect(i) + + switch s := i.(type) { + case T: + return s, true + case int: + return T(s), true + case int8: + return T(s), true + case int16: + return T(s), true + case int32: + return T(s), true + case int64: + return T(s), true + case uint: + return T(s), true + case uint8: + return T(s), true + case uint16: + return T(s), true + case uint32: + return T(s), true + case uint64: + return T(s), true + case float32: + return T(s), true + case float64: + return T(s), true + case bool: + if s { + return 1, true + } + + return 0, true + case nil: + return 0, true + case time.Weekday: + return T(s), true + case time.Month: + return T(s), true + } + + return 0, false +} + +func toNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) { + n, ok := toNumber[T](i) + if ok { + return n, nil + } + + i, _ = indirect(i) + + switch s := i.(type) { + case string: + if s == "" { + return 0, nil + } + + v, err := parseFn(s) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case json.Number: + if s == "" { + return 0, nil + } + + v, err := parseFn(string(s)) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case float64EProvider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v, err := s.Float64() + if err != nil { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + return T(v), nil + case float64Provider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + return T(s.Float64()), nil + default: + if i, ok := resolveAlias(i); ok { + return toNumberE(i, parseFn) + } + + return 0, fmt.Errorf(errorMsg, i, i, n) + } +} + +func toUnsignedNumber[T Number](i any) (T, bool, bool) { + i, _ = indirect(i) + + switch s := i.(type) { + case T: + return s, true, true + case int: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int8: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int16: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int32: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int64: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case uint: + return T(s), true, true + case uint8: + return T(s), true, true + case uint16: + return T(s), true, true + case uint32: + return T(s), true, true + case uint64: + return T(s), true, true + case float32: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case float64: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case bool: + if s { + return 1, true, true + } + + return 0, true, true + case nil: + return 0, true, true + case time.Weekday: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case time.Month: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + } + + return 0, true, false +} + +func toUnsignedNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) { + n, valid, ok := toUnsignedNumber[T](i) + if ok { + return n, nil + } + + i, _ = indirect(i) + + if !valid { + return 0, errNegativeNotAllowed + } + + switch s := i.(type) { + case string: + if s == "" { + return 0, nil + } + + v, err := parseFn(s) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case json.Number: + if s == "" { + return 0, nil + } + + v, err := parseFn(string(s)) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case float64EProvider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v, err := s.Float64() + if err != nil { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + if v < 0 { + return 0, errNegativeNotAllowed + } + + return T(v), nil + case float64Provider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v := s.Float64() + + if v < 0 { + return 0, errNegativeNotAllowed + } + + return T(v), nil + default: + if i, ok := resolveAlias(i); ok { + return toUnsignedNumberE(i, parseFn) + } + + return 0, fmt.Errorf(errorMsg, i, i, n) + } +} + +func parseNumber[T Number](s string) (T, error) { + var t T + + switch any(t).(type) { + case int: + v, err := parseInt[int](s) + + return T(v), err + case int8: + v, err := parseInt[int8](s) + + return T(v), err + case int16: + v, err := parseInt[int16](s) + + return T(v), err + case int32: + v, err := parseInt[int32](s) + + return T(v), err + case int64: + v, err := parseInt[int64](s) + + return T(v), err + case uint: + v, err := parseUint[uint](s) + + return T(v), err + case uint8: + v, err := parseUint[uint8](s) + + return T(v), err + case uint16: + v, err := parseUint[uint16](s) + + return T(v), err + case uint32: + v, err := parseUint[uint32](s) + + return T(v), err + case uint64: + v, err := parseUint[uint64](s) + + return T(v), err + case float32: + v, err := strconv.ParseFloat(s, 32) + + return T(v), err + case float64: + v, err := strconv.ParseFloat(s, 64) + + return T(v), err + + default: + return 0, fmt.Errorf("unknown number type: %T", t) + } +} + +func parseInt[T integer](s string) (T, error) { + v, err := strconv.ParseInt(trimDecimal(s), 0, 0) + if err != nil { + return 0, err + } + + return T(v), nil +} + +func parseUint[T unsigned](s string) (T, error) { + v, err := strconv.ParseUint(strings.TrimLeft(trimDecimal(s), "+"), 0, 0) + if err != nil { + return 0, err + } + + return T(v), nil +} + +func parseFloat[T float](s string) (T, error) { + var t T + + var v any + var err error + + switch any(t).(type) { + case float32: + n, e := strconv.ParseFloat(s, 32) + + v = float32(n) + err = e + case float64: + n, e := strconv.ParseFloat(s, 64) + + v = float64(n) + err = e + } + + return v.(T), err +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i any) (float64, error) { + return toNumberE[float64](i, parseFloat[float64]) +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i any) (float32, error) { + return toNumberE[float32](i, parseFloat[float32]) +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i any) (int64, error) { + return toNumberE[int64](i, parseInt[int64]) +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i any) (int32, error) { + return toNumberE[int32](i, parseInt[int32]) +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i any) (int16, error) { + return toNumberE[int16](i, parseInt[int16]) +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i any) (int8, error) { + return toNumberE[int8](i, parseInt[int8]) +} + +// ToIntE casts an interface to an int type. +func ToIntE(i any) (int, error) { + return toNumberE[int](i, parseInt[int]) +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i any) (uint, error) { + return toUnsignedNumberE[uint](i, parseUint[uint]) +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i any) (uint64, error) { + return toUnsignedNumberE[uint64](i, parseUint[uint64]) +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i any) (uint32, error) { + return toUnsignedNumberE[uint32](i, parseUint[uint32]) +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i any) (uint16, error) { + return toUnsignedNumberE[uint16](i, parseUint[uint16]) +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i any) (uint8, error) { + return toUnsignedNumberE[uint8](i, parseUint[uint8]) +} + +func trimZeroDecimal(s string) string { + var foundZero bool + for i := len(s); i > 0; i-- { + switch s[i-1] { + case '.': + if foundZero { + return s[:i-1] + } + case '0': + foundZero = true + default: + return s + } + } + return s +} + +var stringNumberRe = regexp.MustCompile(`^([-+]?\d*)(\.\d*)?$`) + +// see [BenchmarkDecimal] for details about the implementation +func trimDecimal(s string) string { + if !strings.Contains(s, ".") { + return s + } + + matches := stringNumberRe.FindStringSubmatch(s) + if matches != nil { + // matches[1] is the captured integer part with sign + s = matches[1] + + // handle special cases + switch s { + case "-", "+": + s += "0" + case "": + s = "0" + } + + return s + } + + return s +} diff --git a/vendor/github.com/spf13/cast/slice.go b/vendor/github.com/spf13/cast/slice.go new file mode 100644 index 000000000..e6a8328c6 --- /dev/null +++ b/vendor/github.com/spf13/cast/slice.go @@ -0,0 +1,106 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "fmt" + "reflect" + "strings" +) + +// ToSliceE casts any value to a []any type. +func ToSliceE(i any) ([]any, error) { + i, _ = indirect(i) + + var s []any + + switch v := i.(type) { + case []any: + // TODO: use slices.Clone + return append(s, v...), nil + case []map[string]any: + for _, u := range v { + s = append(s, u) + } + + return s, nil + default: + return s, fmt.Errorf(errorMsg, i, i, s) + } +} + +func toSliceE[T Basic](i any) ([]T, error) { + v, ok, err := toSliceEOk[T](i) + if err != nil { + return nil, err + } + + if !ok { + return nil, fmt.Errorf(errorMsg, i, i, []T{}) + } + + return v, nil +} + +func toSliceEOk[T Basic](i any) ([]T, bool, error) { + i, _ = indirect(i) + if i == nil { + return nil, true, fmt.Errorf(errorMsg, i, i, []T{}) + } + + switch v := i.(type) { + case []T: + // TODO: clone slice + return v, true, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]T, s.Len()) + + for j := 0; j < s.Len(); j++ { + val, err := ToE[T](s.Index(j).Interface()) + if err != nil { + return nil, true, fmt.Errorf(errorMsg, i, i, []T{}) + } + + a[j] = val + } + + return a, true, nil + default: + return nil, false, nil + } +} + +// ToStringSliceE casts any value to a []string type. +func ToStringSliceE(i any) ([]string, error) { + if a, ok, err := toSliceEOk[string](i); ok { + if err != nil { + return nil, err + } + + return a, nil + } + + var a []string + + switch v := i.(type) { + case string: + return strings.Fields(v), nil + case any: + str, err := ToStringE(v) + if err != nil { + return nil, fmt.Errorf(errorMsg, i, i, a) + } + + return []string{str}, nil + default: + return nil, fmt.Errorf(errorMsg, i, i, a) + } +} diff --git a/vendor/github.com/spf13/cast/time.go b/vendor/github.com/spf13/cast/time.go new file mode 100644 index 000000000..744cd5acc --- /dev/null +++ b/vendor/github.com/spf13/cast/time.go @@ -0,0 +1,116 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/spf13/cast/internal" +) + +// ToTimeE any value to a [time.Time] type. +func ToTimeE(i any) (time.Time, error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to [time.Time], +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i any, location *time.Location) (tim time.Time, err error) { + i, _ = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDateInDefaultLocation(v, location) + case json.Number: + // Originally this used ToInt64E, but adding string float conversion broke ToTime. + // the behavior of ToTime would have changed if we continued using it. + // For now, using json.Number's own Int64 method should be good enough to preserve backwards compatibility. + v = json.Number(trimZeroDecimal(string(v))) + s, err1 := v.Int64() + if err1 != nil { + return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{}) + } + return time.Unix(s, 0), nil + case int: + return time.Unix(int64(v), 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case nil: + return time.Time{}, nil + default: + return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{}) + } +} + +// ToDurationE casts any value to a [time.Duration] type. +func ToDurationE(i any) (time.Duration, error) { + i, _ = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + v, err := ToInt64E(s) + if err != nil { + // TODO: once there is better error handling, this should be easier + return 0, errors.New(strings.ReplaceAll(err.Error(), " int64", "time.Duration")) + } + + return time.Duration(v), nil + case float32, float64, float64EProvider, float64Provider: + v, err := ToFloat64E(s) + if err != nil { + // TODO: once there is better error handling, this should be easier + return 0, errors.New(strings.ReplaceAll(err.Error(), " float64", "time.Duration")) + } + + return time.Duration(v), nil + case string: + if !strings.ContainsAny(s, "nsuµmh") { + return time.ParseDuration(s + "ns") + } + + return time.ParseDuration(s) + case nil: + return time.Duration(0), nil + default: + if i, ok := resolveAlias(i); ok { + return ToDurationE(i) + } + + return 0, fmt.Errorf(errorMsg, i, i, time.Duration(0)) + } +} + +// StringToDate attempts to parse a string into a [time.Time] type using a +// predefined list of formats. +// +// If no suitable format is found, an error is returned. +func StringToDate(s string) (time.Time, error) { + return internal.ParseDateWith(s, time.UTC, internal.TimeFormats) +} + +// StringToDateInDefaultLocation casts an empty interface to a [time.Time], +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return internal.ParseDateWith(s, location, internal.TimeFormats) +} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go deleted file mode 100644 index 1524fc82c..000000000 --- a/vendor/github.com/spf13/cast/timeformattype_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. - -package cast - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[timeFormatNoTimezone-0] - _ = x[timeFormatNamedTimezone-1] - _ = x[timeFormatNumericTimezone-2] - _ = x[timeFormatNumericAndNamedTimezone-3] - _ = x[timeFormatTimeOnly-4] -} - -const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" - -var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} - -func (i timeFormatType) String() string { - if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { - return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] -} diff --git a/vendor/github.com/spf13/cast/zz_generated.go b/vendor/github.com/spf13/cast/zz_generated.go new file mode 100644 index 000000000..ce3ec0f78 --- /dev/null +++ b/vendor/github.com/spf13/cast/zz_generated.go @@ -0,0 +1,261 @@ +// Code generated by cast generator. DO NOT EDIT. + +package cast + +import "time" + +// ToBool casts any value to a(n) bool type. +func ToBool(i any) bool { + v, _ := ToBoolE(i) + return v +} + +// ToString casts any value to a(n) string type. +func ToString(i any) string { + v, _ := ToStringE(i) + return v +} + +// ToTime casts any value to a(n) time.Time type. +func ToTime(i any) time.Time { + v, _ := ToTimeE(i) + return v +} + +// ToTimeInDefaultLocation casts any value to a(n) time.Time type. +func ToTimeInDefaultLocation(i any, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + +// ToDuration casts any value to a(n) time.Duration type. +func ToDuration(i any) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToInt casts any value to a(n) int type. +func ToInt(i any) int { + v, _ := ToIntE(i) + return v +} + +// ToInt8 casts any value to a(n) int8 type. +func ToInt8(i any) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt16 casts any value to a(n) int16 type. +func ToInt16(i any) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt32 casts any value to a(n) int32 type. +func ToInt32(i any) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt64 casts any value to a(n) int64 type. +func ToInt64(i any) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToUint casts any value to a(n) uint type. +func ToUint(i any) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint8 casts any value to a(n) uint8 type. +func ToUint8(i any) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToUint16 casts any value to a(n) uint16 type. +func ToUint16(i any) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint32 casts any value to a(n) uint32 type. +func ToUint32(i any) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint64 casts any value to a(n) uint64 type. +func ToUint64(i any) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToFloat32 casts any value to a(n) float32 type. +func ToFloat32(i any) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToFloat64 casts any value to a(n) float64 type. +func ToFloat64(i any) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToStringMapString casts any value to a(n) map[string]string type. +func ToStringMapString(i any) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts any value to a(n) map[string][]string type. +func ToStringMapStringSlice(i any) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts any value to a(n) map[string]bool type. +func ToStringMapBool(i any) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMapInt casts any value to a(n) map[string]int type. +func ToStringMapInt(i any) map[string]int { + v, _ := ToStringMapIntE(i) + return v +} + +// ToStringMapInt64 casts any value to a(n) map[string]int64 type. +func ToStringMapInt64(i any) map[string]int64 { + v, _ := ToStringMapInt64E(i) + return v +} + +// ToStringMap casts any value to a(n) map[string]any type. +func ToStringMap(i any) map[string]any { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts any value to a(n) []any type. +func ToSlice(i any) []any { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts any value to a(n) []bool type. +func ToBoolSlice(i any) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts any value to a(n) []string type. +func ToStringSlice(i any) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts any value to a(n) []int type. +func ToIntSlice(i any) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToInt64Slice casts any value to a(n) []int64 type. +func ToInt64Slice(i any) []int64 { + v, _ := ToInt64SliceE(i) + return v +} + +// ToUintSlice casts any value to a(n) []uint type. +func ToUintSlice(i any) []uint { + v, _ := ToUintSliceE(i) + return v +} + +// ToFloat64Slice casts any value to a(n) []float64 type. +func ToFloat64Slice(i any) []float64 { + v, _ := ToFloat64SliceE(i) + return v +} + +// ToDurationSlice casts any value to a(n) []time.Duration type. +func ToDurationSlice(i any) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} + +// ToBoolSliceE casts any value to a(n) []bool type. +func ToBoolSliceE(i any) ([]bool, error) { + return toSliceE[bool](i) +} + +// ToDurationSliceE casts any value to a(n) []time.Duration type. +func ToDurationSliceE(i any) ([]time.Duration, error) { + return toSliceE[time.Duration](i) +} + +// ToIntSliceE casts any value to a(n) []int type. +func ToIntSliceE(i any) ([]int, error) { + return toSliceE[int](i) +} + +// ToInt8SliceE casts any value to a(n) []int8 type. +func ToInt8SliceE(i any) ([]int8, error) { + return toSliceE[int8](i) +} + +// ToInt16SliceE casts any value to a(n) []int16 type. +func ToInt16SliceE(i any) ([]int16, error) { + return toSliceE[int16](i) +} + +// ToInt32SliceE casts any value to a(n) []int32 type. +func ToInt32SliceE(i any) ([]int32, error) { + return toSliceE[int32](i) +} + +// ToInt64SliceE casts any value to a(n) []int64 type. +func ToInt64SliceE(i any) ([]int64, error) { + return toSliceE[int64](i) +} + +// ToUintSliceE casts any value to a(n) []uint type. +func ToUintSliceE(i any) ([]uint, error) { + return toSliceE[uint](i) +} + +// ToUint8SliceE casts any value to a(n) []uint8 type. +func ToUint8SliceE(i any) ([]uint8, error) { + return toSliceE[uint8](i) +} + +// ToUint16SliceE casts any value to a(n) []uint16 type. +func ToUint16SliceE(i any) ([]uint16, error) { + return toSliceE[uint16](i) +} + +// ToUint32SliceE casts any value to a(n) []uint32 type. +func ToUint32SliceE(i any) ([]uint32, error) { + return toSliceE[uint32](i) +} + +// ToUint64SliceE casts any value to a(n) []uint64 type. +func ToUint64SliceE(i any) ([]uint64, error) { + return toSliceE[uint64](i) +} + +// ToFloat32SliceE casts any value to a(n) []float32 type. +func ToFloat32SliceE(i any) ([]float32, error) { + return toSliceE[float32](i) +} + +// ToFloat64SliceE casts any value to a(n) []float64 type. +func ToFloat64SliceE(i any) ([]float64, error) { + return toSliceE[float64](i) +} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2c8f4808c..6acf8ab1e 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -12,14 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "2" + run: - deadline: 5m + timeout: 5m + +formatters: + enable: + - gofmt + - goimports linters: - disable-all: true + default: none enable: #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -30,28 +36,24 @@ linters: - goconst - gocritic #- gocyclo - - gofmt - - goimports - #- gomnd #- goprintffuncname - gosec - - gosimple - govet - ineffassign #- lll - misspell + #- mnd #- nakedret #- noctx - nolintlint #- rowserrcheck - #- scopelint - staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - - stylecheck - #- typecheck - unconvert #- unparam - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace - fast: false + exclusions: + presets: + - common-false-positives + - legacy + - std-error-handling diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 71757151c..8416275f4 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,8 +1,14 @@ - -![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) +
+ +cobra-logo + +
Cobra is a library for creating powerful modern CLI applications. +Visit Cobra.dev for extensive documentation + + Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. @@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) +
+
+ Supported by: +
+
+ + Warp sponsorship + + +### [Warp, the AI terminal for devs](https://www.warp.dev/cobra) +[Try Cobra in Warp today](https://www.warp.dev/cobra)
+ +
+
# Overview diff --git a/vendor/github.com/spf13/cobra/SECURITY.md b/vendor/github.com/spf13/cobra/SECURITY.md new file mode 100644 index 000000000..54e60c28c --- /dev/null +++ b/vendor/github.com/spf13/cobra/SECURITY.md @@ -0,0 +1,105 @@ +# Security Policy + +## Reporting a Vulnerability + +The `cobra` maintainers take security issues seriously and +we appreciate your efforts to _**responsibly**_ disclose your findings. +We will make every effort to swiftly respond and address concerns. + +To report a security vulnerability: + +1. **DO NOT** create a public GitHub issue for the vulnerability! +2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability! +3. Send an email to `cobra-security@googlegroups.com`. +4. Include the following details in your report: + - Description of the vulnerability + - Steps to reproduce + - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.) + - Any potential mitigations you've already identified +5. Allow up to 7 days for an initial response. + You should receive an acknowledgment of your report and an estimated timeline for a fix. +6. (Optional) If you have a fix and would like to contribute your patch, please work + directly with the maintainers via `cobra-security@googlegroups.com` to + coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change. + +## Response Process + +When a security vulnerability report is received, the `cobra` maintainers will: + +1. Confirm receipt of the vulnerability report within 7 days. +2. Assess the report to determine if it constitutes a security vulnerability. +3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it. +4. Develop and test a fix. +5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter. +6. Create a new GitHub Security Advisory to inform the broader Go ecosystem + +## Disclosure Policy + +The `cobra` maintainers follow a coordinated disclosure process: + +1. Security vulnerabilities will be addressed as quickly as possible. +2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities + that are within `cobra` itself. +3. Once a fix is ready, the maintainers will: + - Release a new version containing the fix. + - Update the security advisory with details about the vulnerability. + - Credit the reporter (unless they wish to remain anonymous). + - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter). + - Announce the vulnerability through appropriate channels + (GitHub Security Advisory, mailing lists, GitHub Releases, etc.) + +## Supported Versions + +Security fixes will typically only be released for the most recent major release. + +## Upstream Security Issues + +`cobra` generally will not accept vulnerability reports that originate in upstream +dependencies. I.e., if there is a problem in Go code that `cobra` depends on, +it is best to engage that project's maintainers and owners. + +This security policy primarily pertains only to `cobra` itself but if you believe you've +identified a problem that originates in an upstream dependency and is being widely +distributed by `cobra`, please follow the disclosure procedure above: the `cobra` +maintainers will work with you to determine the severity and ecosystem impact. + +## Security Updates and CVEs + +Information about known security vulnerabilities and CVEs affecting `cobra` will +be published as GitHub Security Advisories at +https://github.com/spf13/cobra/security/advisories. + +All users are encouraged to watch the repository and upgrade promptly when +security releases are published. + +## `cobra` Security Best Practices for Users + +When using `cobra` in your CLIs, the `cobra` maintainers recommend the following: + +1. Always use the latest version of `cobra`. +2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management. +3. Always use the latest possible version of Go. + +## Security Best Practices for Contributors + +When contributing to `cobra`: + +1. Be mindful of security implications when adding new features or modifying existing ones. +2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI + (like Kubernetes, Docker, Prometheus, etc. etc.) +3. Write tests that explicitly cover edge cases and potential issues. +4. If you discover a security issue while working on `cobra`, please report it + following the process above rather than opening a public pull request or issue that + addresses the vulnerability. +5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa), + [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification), + etc. + +## Acknowledgments + +The `cobra` maintainers would like to thank all security researchers and +community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!! + +--- + +*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.* diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index dbb2c298b..78088db69 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -39,7 +39,7 @@ const ( ) // FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist +type FParseErrWhitelist flag.ParseErrorsAllowlist // Group Structure to manage groups for commands type Group struct { @@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { + // FLow the context down to be used in help text + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) @@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error { c.mergePersistentFlags() // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist) err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). @@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) } if c.HasHelpSubCommands() { - fmt.Fprintf(w, "\n\nAdditional help topcis:") + fmt.Fprintf(w, "\n\nAdditional help topics:") for _, subcmd := range c.Commands() { if subcmd.IsAdditionalHelpTopicCommand() { fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index a1752f763..d3607c2d2 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -115,6 +115,13 @@ type CompletionOptions struct { DisableDescriptions bool // HiddenDefaultCmd makes the default 'completion' command hidden HiddenDefaultCmd bool + // DefaultShellCompDirective sets the ShellCompDirective that is returned + // if no special directive can be determined + DefaultShellCompDirective *ShellCompDirective +} + +func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) { + receiver.DefaultShellCompDirective = &directive } // Completion is a string that can be used for completions @@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo // Error while attempting to parse flags if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + if _, ok := flagErr.(*flagCompError); !ok || flagCompletion { return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } @@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo } } else { directive = ShellCompDirectiveDefault + // check current and parent commands for a custom DefaultShellCompDirective + for cmd := finalCmd; cmd != nil; cmd = cmd.parent { + if cmd.CompletionOptions.DefaultShellCompDirective != nil { + directive = *cmd.CompletionOptions.DefaultShellCompDirective + break + } + } + if flag == nil { foundLocalNonPersistentFlag := false // If TraverseChildren is true on the root command we don't check for @@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script. // shell completion for it (prog __complete completion '') subCmd, cmdArgs, err := c.Find(args) if err != nil || subCmd.Name() != compCmdName && - !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) { // The completion command is not being called or being completed so we remove it. c.RemoveCommand(completionCmd) return diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index d4dfbc5ea..2fd3c5759 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -137,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -158,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -928,7 +938,6 @@ func VarP(value Value, name, shorthand, usage string) { // returns the error. func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -986,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -1044,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1158,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true + f.args = make([]string, 0, len(arguments)) + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1174,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1200,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index f563907e2..e62eab538 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -8,6 +8,7 @@ import ( goflag "flag" "reflect" "strings" + "time" ) // go test flags prefixes @@ -113,6 +114,38 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + // ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), // since by default those are skipped by pflag.Parse(). // Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` @@ -125,3 +158,4 @@ func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { } return goFlagSet.Parse(skippedFlags) } + diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc..1d1e3bf91 100644 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go index dc024807e..3dee42479 100644 --- a/vendor/github.com/spf13/pflag/time.go +++ b/vendor/github.com/spf13/pflag/time.go @@ -48,7 +48,13 @@ func (d *timeValue) Type() string { return "time" } -func (d *timeValue) String() string { return d.Time.Format(time.RFC3339Nano) } +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} // GetTime return the time value of a flag with the given name func (f *FlagSet) GetTime(name string) (time.Time, error) { diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig index 1f664d13a..faef0c91e 100644 --- a/vendor/github.com/spf13/viper/.editorconfig +++ b/vendor/github.com/spf13/viper/.editorconfig @@ -16,3 +16,6 @@ indent_style = tab [*.nix] indent_size = 2 + +[.golangci.yaml] +indent_size = 2 diff --git a/vendor/github.com/spf13/viper/.golangci.yaml b/vendor/github.com/spf13/viper/.golangci.yaml index 474f41633..bed0b83ec 100644 --- a/vendor/github.com/spf13/viper/.golangci.yaml +++ b/vendor/github.com/spf13/viper/.golangci.yaml @@ -1,105 +1,118 @@ -run: - timeout: 5m +version: "2" -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/viper) - gocritic: - # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. - enabled-tags: - - diagnostic - - experimental - - opinionated - - style - disabled-checks: - - importShadow - - unnamedResult - goimports: - local-prefixes: github.com/spf13/viper +run: + timeout: 5m linters: - disable-all: true - enable: - - bodyclose - - dogsled - - dupl - - durationcheck - - exhaustive - - gci - - gocritic - - godot - - gofmt - - gofumpt - - goimports - - gomoddirectives - - goprintffuncname - - govet - - importas - - ineffassign - - makezero - - misspell - - nakedret - - nilerr - - noctx - - nolintlint - - prealloc - - predeclared - - revive - - rowserrcheck - - sqlclosecheck - - staticcheck - - stylecheck - - tparallel - - typecheck - - unconvert - - unparam - - unused - - wastedassign - - whitespace + enable: + - bodyclose + - dogsled + - dupl + - durationcheck + - exhaustive + - gocritic + - godot + - gomoddirectives + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - tparallel + - unconvert + - unparam + - unused + - wastedassign + - whitespace - # fixme - # - cyclop - # - errcheck - # - errorlint - # - exhaustivestruct - # - forbidigo - # - forcetypeassert - # - gochecknoglobals - # - gochecknoinits - # - gocognit - # - goconst - # - gocyclo - # - gosec - # - gosimple - # - ifshort - # - lll - # - nlreturn - # - paralleltest - # - scopelint - # - thelper - # - wrapcheck + # fixme + # - cyclop + # - errcheck + # - errorlint + # - exhaustivestruct + # - forbidigo + # - forcetypeassert + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocyclo + # - gosec + # - gosimple + # - ifshort + # - lll + # - nlreturn + # - paralleltest + # - scopelint + # - thelper + # - wrapcheck - # unused - # - depguard - # - goheader - # - gomodguard + # unused + # - depguard + # - goheader + # - gomodguard - # deprecated - # - deadcode - # - structcheck - # - varcheck + # don't enable: + # - asciicheck + # - funlen + # - godox + # - goerr113 + # - gomnd + # - interfacer + # - maligned + # - nestif + # - testpackage + # - wsl - # don't enable: - # - asciicheck - # - funlen - # - godox - # - goerr113 - # - gomnd - # - interfacer - # - maligned - # - nestif - # - testpackage - # - wsl + exclusions: + rules: + - linters: + - errcheck + - noctx + path: _test.go + presets: + - comments + - std-error-handling + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + gocritic: + # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. + enabled-tags: + - diagnostic + - experimental + - opinionated + - style + disabled-checks: + - importShadow + - unnamedResult + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + # - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 769a5d900..7a4c0fc30 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -12,7 +12,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.21-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -821,7 +821,7 @@ You can use your favorite format's marshaller with the config returned by `AllSe ```go import ( - yaml "gopkg.in/yaml.v2" + yaml "go.yaml.in/yaml/v3" // ... ) diff --git a/vendor/github.com/spf13/viper/UPDATES.md b/vendor/github.com/spf13/viper/UPGRADE.md similarity index 79% rename from vendor/github.com/spf13/viper/UPDATES.md rename to vendor/github.com/spf13/viper/UPGRADE.md index ccf413ed7..a33c965a4 100644 --- a/vendor/github.com/spf13/viper/UPDATES.md +++ b/vendor/github.com/spf13/viper/UPGRADE.md @@ -83,6 +83,27 @@ v := viper.NewWithOptions( ) ``` +### BREAKING: "github.com/mitchellh/mapstructure" depedency replaced + +The original [mapstructure](https://github.com/mitchellh/mapstructure) has been [archived](https://github.com/mitchellh/mapstructure/issues/349) and was replaced with a [fork](https://github.com/go-viper/mapstructure) maintained by Viper ([#1723](https://github.com/spf13/viper/pull/1723)). + +As a result, the package import path needs to be changed in cases where `mapstructure` is directly referenced in your code. + +For example, when providing a custom decoder config: + +```go +err := viper.Unmarshal(&appConfig, func(config *mapstructure.DecoderConfig) { + config.TagName = "yaml" +}) +``` + +The change is fairly straightforward, just replace all occurrences of the import path `github.com/mitchellh/mapstructure` with `github.com/go-viper/mapstructure/v2`: + +```diff +- import "github.com/mitchellh/mapstructure" ++ import "github.com/go-viper/mapstructure/v2" +``` + ### BREAKING: HCL, Java properties, INI removed from core In order to reduce third-party dependencies, Viper dropped support for the following formats from the core: diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock index d76dfbddd..0b8cfb5a8 100644 --- a/vendor/github.com/spf13/viper/flake.lock +++ b/vendor/github.com/spf13/viper/flake.lock @@ -2,30 +2,32 @@ "nodes": { "cachix": { "inputs": { - "devenv": "devenv_2", + "devenv": [ + "devenv" + ], "flake-compat": [ + "devenv" + ], + "git-hooks": [ "devenv", - "flake-compat" + "git-hooks" ], "nixpkgs": [ "devenv", "nixpkgs" - ], - "pre-commit-hooks": [ - "devenv", - "pre-commit-hooks" ] }, "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "lastModified": 1748883665, + "narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=", "owner": "cachix", "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "rev": "f707778d902af4d62d8dd92c269f8e70de09acbe", "type": "github" }, "original": { "owner": "cachix", + "ref": "latest", "repo": "cachix", "type": "github" } @@ -33,52 +35,21 @@ "devenv": { "inputs": { "cachix": "cachix", - "flake-compat": "flake-compat_2", - "nix": "nix_2", - "nixpkgs": "nixpkgs_2", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1724763216, - "narHash": "sha256-oW2bwCrJpIzibCNK6zfIDaIQw765yMAuMSG2gyZfGv0=", - "owner": "cachix", - "repo": "devenv", - "rev": "1e4ef61205b9aa20fe04bf1c468b6a316281c4f1", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "flake-compat": [ - "devenv", - "cachix", - "flake-compat" - ], + "flake-compat": "flake-compat", + "git-hooks": "git-hooks", "nix": "nix", - "nixpkgs": "nixpkgs", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ - "devenv", - "cachix", - "pre-commit-hooks" - ] + "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "lastModified": 1755257397, + "narHash": "sha256-VU+OHexL2y6y7yrpEc6bZvYYwoQg6aZK1b4YxT0yZCk=", "owner": "cachix", "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "rev": "6f9c3d4722aa253631644329f7bda60b1d3d1b97", "type": "github" }, "original": { "owner": "cachix", - "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -86,27 +57,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { @@ -117,14 +72,18 @@ }, "flake-parts": { "inputs": { - "nixpkgs-lib": "nixpkgs-lib" + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] }, "locked": { - "lastModified": 1722555600, - "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", + "lastModified": 1733312601, + "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", + "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", "type": "github" }, "original": { @@ -133,39 +92,47 @@ "type": "github" } }, - "flake-utils": { + "flake-parts_2": { "inputs": { - "systems": "systems" + "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "lastModified": 1754487366, + "narHash": "sha256-pHYj8gUBapuUzKV/kN/tR3Zvqc7o6gdFB9XKXIp1SQ8=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "af66ad14b28a127c5c0f3bbb298218fc63528a18", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "hercules-ci", + "repo": "flake-parts", "type": "github" } }, - "flake-utils_2": { + "git-hooks": { "inputs": { - "systems": "systems_2" + "flake-compat": [ + "devenv", + "flake-compat" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] }, "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "lastModified": 1750779888, + "narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "cachix", + "repo": "git-hooks.nix", "type": "github" } }, @@ -173,7 +140,7 @@ "inputs": { "nixpkgs": [ "devenv", - "pre-commit-hooks", + "git-hooks", "nixpkgs" ] }, @@ -192,165 +159,49 @@ } }, "nix": { - "inputs": { - "flake-compat": "flake-compat", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], + "flake-parts": "flake-parts", + "git-hooks-nix": [ + "devenv", + "git-hooks" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression_2" + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" + ] }, "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", + "lastModified": 1755029779, + "narHash": "sha256-3+GHIYGg4U9XKUN4rg473frIVNn8YD06bjwxKS1IPrU=", + "owner": "cachix", "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "rev": "b0972b0eee6726081d10b1199f54de6d2917f861", "type": "github" }, "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", + "owner": "cachix", + "ref": "devenv-2.30", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1722555339, - "narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" - }, - "original": { - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1710695816, - "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "614b4613980a522ba49f0d194531beddbb7220d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1713361204, - "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "lastModified": 1750441195, + "narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=", "owner": "cachix", "repo": "devenv-nixpkgs", - "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "rev": "0ceffe312871b443929ff3006960d29b120dc627", "type": "github" }, "original": { @@ -360,110 +211,42 @@ "type": "github" } }, - "nixpkgs_3": { - "locked": { - "lastModified": 1724748588, - "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, + "nixpkgs-lib": { "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "lastModified": 1753579242, + "narHash": "sha256-zvaMGVn14/Zz8hnp4VWT9xVnhc8vuL3TStRqwk22biA=", "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "repo": "nixpkgs.lib", + "rev": "0f36c44e01a6129be94e3ade315a5883f0228a6e", "type": "github" }, "original": { "owner": "nix-community", - "repo": "poetry2nix", + "repo": "nixpkgs.lib", "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils_2", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, + "nixpkgs_2": { "locked": { - "lastModified": 1713775815, - "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "lastModified": 1755268003, + "narHash": "sha256-nNaeJjo861wFR0tjHDyCnHs1rbRtrMgxAKMoig9Sj/w=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "32f313e49e42f715491e1ea7b306a87c16fe0388", "type": "github" }, "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_3" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_2" } } }, diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix index 52ad7d581..a16b2e3a7 100644 --- a/vendor/github.com/spf13/viper/flake.nix +++ b/vendor/github.com/spf13/viper/flake.nix @@ -7,51 +7,55 @@ devenv.url = "github:cachix/devenv"; }; - outputs = inputs@{ flake-parts, ... }: + outputs = + inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.devenv.flakeModule ]; - systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.go_1_23; - }; - - pre-commit.hooks = { - nixpkgs-fmt.enable = true; - yamllint.enable = true; - }; - - packages = with pkgs; [ - gnumake - - golangci-lint - yamllint - ]; + systems = [ + "x86_64-linux" + "x86_64-darwin" + "aarch64-darwin" + ]; - scripts = { - versions.exec = '' - go version - golangci-lint version + perSystem = + { pkgs, ... }: + { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + git-hooks.hooks = { + nixpkgs-fmt.enable = true; + yamllint.enable = true; + }; + + packages = with pkgs; [ + gnumake + + golangci-lint + yamllint + ]; + + scripts = { + versions.exec = '' + go version + golangci-lint version + ''; + }; + + enterShell = '' + versions ''; - }; - - enterShell = '' - versions - ''; - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; }; - - ci = devenv.shells.default; }; - }; }; } diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go index 036879249..a7a839fd9 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -1,6 +1,6 @@ package yaml -import "gopkg.in/yaml.v3" +import "go.yaml.in/yaml/v3" // Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. type Codec struct{} diff --git a/vendor/github.com/spf13/viper/remote.go b/vendor/github.com/spf13/viper/remote.go index bdde7de26..46f26721d 100644 --- a/vendor/github.com/spf13/viper/remote.go +++ b/vendor/github.com/spf13/viper/remote.go @@ -219,7 +219,10 @@ func (v *Viper) watchKeyValueConfigOnChannel() error { for { b := <-rc reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) + err := v.unmarshalReader(reader, v.kvstore) + if err != nil { + v.logger.Error(fmt.Errorf("failed to unmarshal remote config: %w", err).Error()) + } } }(respc) return nil diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index 2a08074bc..d08ed4621 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -174,10 +174,7 @@ func parseSizeInBytes(sizeStr string) uint { } } - size := cast.ToInt(sizeStr) - if size < 0 { - size = 0 - } + size := max(cast.ToInt(sizeStr), 0) return safeMul(uint(size), multiplier) } diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index a58d757bd..34a94798b 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -376,7 +376,12 @@ func (v *Viper) WatchConfig() { } } }() - watcher.Add(configDir) + err = watcher.Add(configDir) + if err != nil { + v.logger.Error(fmt.Sprintf("failed to add watcher: %s", err)) + initWG.Done() + return + } initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on... eventsWG.Wait() // now, wait for event loop to end in this go-routine... }() @@ -1181,11 +1186,26 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) any { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return res + case "boolSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToBoolSlice(res) case "intSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "uintSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToUintSlice(res) + case "float64Slice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToFloat64Slice(res) case "durationSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") @@ -1268,11 +1288,26 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) any { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return res + case "boolSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToBoolSlice(res) case "intSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "uintSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToUintSlice(res) + case "float64Slice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToFloat64Slice(res) case "stringToString": return stringToStringConv(flag.ValueString()) case "stringToInt": @@ -1670,7 +1705,10 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { } buf := new(bytes.Buffer) - buf.ReadFrom(in) + _, err := buf.ReadFrom(in) + if err != nil { + return fmt.Errorf("failed to read configuration from input: %w", err) + } // TODO: remove this once SupportedExts is deprecated/removed if !slices.Contains(SupportedExts, format) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index b25641c55..521daa25d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, http.NoBody) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, http.NoBody) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index 6bd50d4c9..38fb79c03 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,9 +8,8 @@ import ( "net/http" "net/http/httptrace" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 937f9b4e7..fef83b42f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -8,13 +8,13 @@ import ( "time" "github.com/felixge/httpsnoop" - - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" ) // middleware is an http middleware which wraps the next handler in a span. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 7edc8d10f..821b80ec4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -10,14 +10,13 @@ import ( "context" "fmt" "net/http" - "os" "strings" "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/semconv/v1.34.0/httpconv" + "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" ) // OTelSemConvStabilityOptIn is an environment variable. @@ -33,14 +32,6 @@ type ResponseTelemetry struct { } type HTTPServer struct { - duplicate bool - - // Old metrics - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram - - // New metrics requestBodySizeHistogram httpconv.ServerRequestBodySize responseBodySizeHistogram httpconv.ServerResponseBodySize requestDurationHistogram httpconv.ServerRequestDuration @@ -63,20 +54,10 @@ type HTTPServer struct { // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { - attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) - if s.duplicate { - return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs) - } - return attrs + return CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) } func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { - if s.duplicate { - return []attribute.KeyValue{ - OldHTTPServer{}.NetworkTransportAttr(network), - CurrentHTTPServer{}.NetworkTransportAttr(network), - } - } return []attribute.KeyValue{ CurrentHTTPServer{}.NetworkTransportAttr(network), } @@ -86,11 +67,7 @@ func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp) - if s.duplicate { - return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs) - } - return attrs + return CurrentHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. @@ -134,13 +111,13 @@ type MetricData struct { var ( metricAddOptionPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &[]metric.AddOption{} }, } metricRecordOptionPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &[]metric.RecordOption{} }, } @@ -156,18 +133,6 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { s.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o) *recordOpts = (*recordOpts)[:0] metricRecordOptionPool.Put(recordOpts) - - if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { - attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) - *addOpts = append(*addOpts, o) - s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) - *addOpts = (*addOpts)[:0] - metricAddOptionPool.Put(addOpts) - } } // hasOptIn returns true if the comma-separated version string contains the @@ -182,11 +147,7 @@ func hasOptIn(version, optIn string) bool { } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) - duplicate := hasOptIn(env, "http/dup") - server := HTTPServer{ - duplicate: duplicate, - } + server := HTTPServer{} var err error server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter) @@ -203,32 +164,16 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { ), ) handleErr(err) - - if duplicate { - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) - } return server } type HTTPClient struct { - duplicate bool - - // old metrics - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram - - // new metrics requestBodySize httpconv.ClientRequestBodySize requestDuration httpconv.ClientRequestDuration } func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) - duplicate := hasOptIn(env, "http/dup") - client := HTTPClient{ - duplicate: duplicate, - } + client := HTTPClient{} var err error client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter) @@ -240,29 +185,17 @@ func NewHTTPClient(meter metric.Meter) HTTPClient { ) handleErr(err) - if duplicate { - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) - } - return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.RequestTraceAttrs(req) - if c.duplicate { - return OldHTTPClient{}.RequestTraceAttrs(req, attrs) - } - return attrs + return CurrentHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp) - if c.duplicate { - return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs) - } - return attrs + return CurrentHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -302,42 +235,14 @@ func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { addOptions: set, } - if c.duplicate { - attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["old"] = MetricOpts{ - measurement: set, - addOptions: set, - } - } - return opts } func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { s.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) s.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) - - if s.duplicate { - s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) - } -} - -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { - if s.responseBytesCounter == nil { - // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). - return - } - - s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) } func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.TraceAttributes(host) - if s.duplicate { - return OldHTTPClient{}.TraceAttributes(host, attrs) - } - - return attrs + return CurrentHTTPClient{}.TraceAttributes(host) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go index b4036dd90..1bb207b80 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -13,4 +13,3 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ //go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconvtest_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 9766f3ac8..28c51a3b3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -17,7 +17,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.34.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0" ) type RequestTraceAttrsOpts struct { @@ -194,7 +194,7 @@ func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute. return semconvNew.HTTPRequestMethodGet, orig } -func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter if https { return semconvNew.URLScheme("https") } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e4ebf8581..96422ad1e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -14,7 +14,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.34.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0" ) // SplitHostPort splits a network address hostport of the form "host", @@ -53,10 +53,10 @@ func SplitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) // nolint: gosec // Byte size checked 16 above. + return host, int(p) //nolint:gosec // Byte size checked 16 above. } -func requiredHTTPPort(https bool, port int) int { // nolint:revive +func requiredHTTPPort(https bool, port int) int { //nolint:revive // ignore linter if https { if port > 0 && port != 443 { return port diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go deleted file mode 100644 index ba7fccf1e..000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/v120.0.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "errors" - "io" - "net/http" - "slices" - - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -type OldHTTPServer struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs) -} - -func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { - return semconvutil.NetTransport(network) -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue { - if resp.ReadBytes > 0 { - attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) - } - if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { - // This is not in the semantic conventions, but is historically provided - attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) - } - if resp.WriteBytes > 0 { - attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) - } - if resp.StatusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) - } - if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { - // This is not in the semantic conventions, but is historically provided - attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) - } - - return attributes -} - -// Route returns the attribute for the route. -func (o OldHTTPServer) Route(route string) attribute.KeyValue { - return semconv.HTTPRoute(route) -} - -// HTTPStatusCode returns the attribute for the HTTP status code. -// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. -func HTTPStatusCode(status int) attribute.KeyValue { - return semconv.HTTPStatusCode(status) -} - -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - -func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { - if meter == nil { - return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} - } - var err error - requestBytesCounter, err := meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - responseBytesCounter, err := meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - serverLatencyMeasure, err := meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) - - return requestBytesCounter, responseBytesCounter, serverLatencyMeasure -} - -func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - n := len(additionalAttributes) + 3 - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - n++ - } - if protoVersion != "" { - n++ - } - - if statusCode > 0 { - n++ - } - - attributes := slices.Grow(additionalAttributes, n) - attributes = append(attributes, - semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), - o.scheme(req.TLS != nil), - semconv.NetHostName(host)) - - if hostPort > 0 { - attributes = append(attributes, semconv.NetHostPort(hostPort)) - } - if protoName != "" { - attributes = append(attributes, semconv.NetProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - return attributes -} - -func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return semconv.HTTPSchemeHTTPS - } - return semconv.HTTPSchemeHTTP -} - -type OldHTTPClient struct{} - -func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPClientRequest(req, attrs) -} - -func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPClientResponse(resp, attrs) -} - -func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - http.status_code int - net.peer.name string - net.peer.port int - */ - - n := 2 // method, peer name. - var h string - if req.URL != nil { - h = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if port > 0 { - n++ - } - - if statusCode > 0 { - n++ - } - - attributes := slices.Grow(additionalAttributes, n) - attributes = append(attributes, - semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), - semconv.NetPeerName(requestHost), - ) - - if port > 0 { - attributes = append(attributes, semconv.NetPeerPort(port)) - } - - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - return attributes -} - -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Incoming request bytes total - clientResponseSize = "http.client.response.size" // Incoming response bytes total - clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds -) - -func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { - if meter == nil { - return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} - } - requestBytesCounter, err := meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - responseBytesCounter, err := meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - latencyMeasure, err := meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) - - return requestBytesCounter, responseBytesCounter, latencyMeasure -} - -// TraceAttributes returns attributes for httptrace. -func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue { - return append(attrs, semconv.NetHostName(host)) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go deleted file mode 100644 index 7aa5f99e8..000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -// Generate semconvutil package: -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go deleted file mode 100644 index b99735479..000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ /dev/null @@ -1,594 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconvutil/httpconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconvutil provides OpenTelemetry semantic convention utilities. -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -import ( - "fmt" - "net/http" - "slices" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -type HTTPServerRequestOptions struct { - // If set, this is used as value for the "http.client_ip" attribute. - HTTPClientIP string -} - -// HTTPClientResponse returns trace attributes for an HTTP response received by a -// client from a server. It will return the following attributes if the related -// values are defined in resp: "http.status.code", -// "http.response_content_length". -// -// This does not add all OpenTelemetry required attributes for an HTTP event, -// it assumes ClientRequest was used to create the span with a complete set of -// attributes. If a complete set of attributes can be generated using the -// request contained in resp. For example: -// -// HTTPClientResponse(resp, ClientRequest(resp.Request))) -func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ClientResponse(resp, attrs) -} - -// HTTPClientRequest returns trace attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.url", "http.method", -// "net.peer.name". The following attributes are returned if the related values -// are defined in req: "net.peer.port", "user_agent.original", -// "http.request_content_length". -func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ClientRequest(req, attrs) -} - -// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.method", "net.peer.name". -// The following attributes are returned if the -// related values are defined in req: "net.peer.port". -func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue { - return hc.ClientRequestMetrics(req) -} - -// HTTPClientStatus returns a span status code and message for an HTTP status code -// value received by a client. -func HTTPClientStatus(code int) (codes.Code, string) { - return hc.ClientStatus(code) -} - -// HTTPServerRequest returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.target", "net.host.name". The following attributes are returned if -// they related values are defined in req: "net.host.port", "net.sock.peer.addr", -// "net.sock.peer.port", "user_agent.original", "http.client_ip". -func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ServerRequest(server, req, opts, attrs) -} - -// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "net.host.name". The following attributes are returned if they related -// values are defined in req: "net.host.port". -func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - return hc.ServerRequestMetrics(server, req) -} - -// HTTPServerStatus returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func HTTPServerStatus(code int) (codes.Code, string) { - return hc.ServerStatus(code) -} - -// httpConv are the HTTP semantic convention attributes defined for a version -// of the OpenTelemetry specification. -type httpConv struct { - NetConv *netConv - - HTTPClientIPKey attribute.Key - HTTPMethodKey attribute.Key - HTTPRequestContentLengthKey attribute.Key - HTTPResponseContentLengthKey attribute.Key - HTTPRouteKey attribute.Key - HTTPSchemeHTTP attribute.KeyValue - HTTPSchemeHTTPS attribute.KeyValue - HTTPStatusCodeKey attribute.Key - HTTPTargetKey attribute.Key - HTTPURLKey attribute.Key - UserAgentOriginalKey attribute.Key -} - -var hc = &httpConv{ - NetConv: nc, - - HTTPClientIPKey: semconv.HTTPClientIPKey, - HTTPMethodKey: semconv.HTTPMethodKey, - HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, - HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, - HTTPRouteKey: semconv.HTTPRouteKey, - HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, - HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, - HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, - HTTPTargetKey: semconv.HTTPTargetKey, - HTTPURLKey: semconv.HTTPURLKey, - UserAgentOriginalKey: semconv.UserAgentOriginalKey, -} - -// ClientResponse returns attributes for an HTTP response received by a client -// from a server. The following attributes are returned if the related values -// are defined in resp: "http.status.code", "http.response_content_length". -// -// This does not add all OpenTelemetry required attributes for an HTTP event, -// it assumes ClientRequest was used to create the span with a complete set of -// attributes. If a complete set of attributes can be generated using the -// request contained in resp. For example: -// -// ClientResponse(resp, ClientRequest(resp.Request)) -func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.status_code int - http.response_content_length int - */ - var n int - if resp.StatusCode > 0 { - n++ - } - if resp.ContentLength > 0 { - n++ - } - if n == 0 { - return attrs - } - - attrs = slices.Grow(attrs, n) - if resp.StatusCode > 0 { - attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) - } - if resp.ContentLength > 0 { - attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) - } - return attrs -} - -// ClientRequest returns attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.url", "http.method", -// "net.peer.name". The following attributes are returned if the related values -// are defined in req: "net.peer.port", "user_agent.original", -// "http.request_content_length", "user_agent.original". -func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - user_agent.original string - http.url string - net.peer.name string - net.peer.port int - http.request_content_length int - */ - - /* The following semantic conventions are not returned: - http.status_code This requires the response. See ClientResponse. - http.response_content_length This requires the response. See ClientResponse. - net.sock.family This requires the socket used. - net.sock.peer.addr This requires the socket used. - net.sock.peer.name This requires the socket used. - net.sock.peer.port This requires the socket used. - http.resend_count This is something outside of a single request. - net.protocol.name The value is the Request is ignored, and the go client will always use "http". - net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. - */ - n := 3 // URL, peer name, proto, and method. - var h string - if req.URL != nil { - h = req.URL.Host - } - peer, p := firstHostPort(h, req.Header.Get("Host")) - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) - if port > 0 { - n++ - } - useragent := req.UserAgent() - if useragent != "" { - n++ - } - if req.ContentLength > 0 { - n++ - } - - attrs = slices.Grow(attrs, n) - attrs = append(attrs, c.method(req.Method)) - - var u string - if req.URL != nil { - // Remove any username/password info that may be in the URL. - userinfo := req.URL.User - req.URL.User = nil - u = req.URL.String() - // Restore any username/password info that was removed. - req.URL.User = userinfo - } - attrs = append(attrs, c.HTTPURLKey.String(u)) - - attrs = append(attrs, c.NetConv.PeerName(peer)) - if port > 0 { - attrs = append(attrs, c.NetConv.PeerPort(port)) - } - - if useragent != "" { - attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) - } - - if l := req.ContentLength; l > 0 { - attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) - } - - return attrs -} - -// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.method", "net.peer.name". -// The following attributes are returned if the related values -// are defined in req: "net.peer.port". -func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - net.peer.name string - net.peer.port int - */ - - n := 2 // method, peer name. - var h string - if req.URL != nil { - h = req.URL.Host - } - peer, p := firstHostPort(h, req.Header.Get("Host")) - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) - if port > 0 { - n++ - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer)) - - if port > 0 { - attrs = append(attrs, c.NetConv.PeerPort(port)) - } - - return attrs -} - -// ServerRequest returns attributes for an HTTP request received by a server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.target", "net.host.name". The following attributes are returned if they -// related values are defined in req: "net.host.port", "net.sock.peer.addr", -// "net.sock.peer.port", "user_agent.original", "http.client_ip", -// "net.protocol.name", "net.protocol.version". -func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - http.scheme string - net.host.name string - net.host.port int - net.sock.peer.addr string - net.sock.peer.port int - user_agent.original string - http.client_ip string - net.protocol.name string Note: not set if the value is "http". - net.protocol.version string - http.target string Note: doesn't include the query parameter. - */ - - /* The following semantic conventions are not returned: - http.status_code This requires the response. - http.request_content_length This requires the len() of body, which can mutate it. - http.response_content_length This requires the response. - http.route This is not available. - net.sock.peer.name This would require a DNS lookup. - net.sock.host.addr The request doesn't have access to the underlying socket. - net.sock.host.port The request doesn't have access to the underlying socket. - - */ - n := 4 // Method, scheme, proto, and host name. - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - peer, peerPort := splitHostPort(req.RemoteAddr) - if peer != "" { - n++ - if peerPort > 0 { - n++ - } - } - useragent := req.UserAgent() - if useragent != "" { - n++ - } - - // For client IP, use, in order: - // 1. The value passed in the options - // 2. The value in the X-Forwarded-For header - // 3. The peer address - clientIP := opts.HTTPClientIP - if clientIP == "" { - clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) - if clientIP == "" { - clientIP = peer - } - } - if clientIP != "" { - n++ - } - - var target string - if req.URL != nil { - target = req.URL.Path - if target != "" { - n++ - } - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - n++ - } - if protoVersion != "" { - n++ - } - - attrs = slices.Grow(attrs, n) - - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.NetConv.HostName(host)) - - if hostPort > 0 { - attrs = append(attrs, c.NetConv.HostPort(hostPort)) - } - - if peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) - if peerPort > 0 { - attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) - } - } - - if useragent != "" { - attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) - } - - if clientIP != "" { - attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) - } - - if target != "" { - attrs = append(attrs, c.HTTPTargetKey.String(target)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) - } - - return attrs -} - -// ServerRequestMetrics returns metric attributes for an HTTP request received -// by a server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "net.host.name". The following attributes are returned if they related -// values are defined in req: "net.host.port". -func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.scheme string - http.route string - http.method string - http.status_code int - net.host.name string - net.host.port int - net.protocol.name string Note: not set if the value is "http". - net.protocol.version string - */ - - n := 3 // Method, scheme, and host name. - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - n++ - } - if protoVersion != "" { - n++ - } - - attrs := make([]attribute.KeyValue, 0, n) - - attrs = append(attrs, c.methodMetric(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.NetConv.HostName(host)) - - if hostPort > 0 { - attrs = append(attrs, c.NetConv.HostPort(hostPort)) - } - if protoName != "" { - attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) - } - - return attrs -} - -func (c *httpConv) method(method string) attribute.KeyValue { - if method == "" { - return c.HTTPMethodKey.String(http.MethodGet) - } - return c.HTTPMethodKey.String(method) -} - -func (c *httpConv) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return c.HTTPMethodKey.String(method) -} - -func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return c.HTTPSchemeHTTPS - } - return c.HTTPSchemeHTTP -} - -func serverClientIP(xForwardedFor string) string { - if idx := strings.Index(xForwardedFor, ","); idx >= 0 { - xForwardedFor = xForwardedFor[:idx] - } - return xForwardedFor -} - -func requiredHTTPPort(https bool, port int) int { // nolint:revive - if https { - if port > 0 && port != 443 { - return port - } - } else { - if port > 0 && port != 80 { - return port - } - } - return -1 -} - -// Return the request host and port from the first non-empty source. -func firstHostPort(source ...string) (host string, port int) { - for _, hostport := range source { - host, port = splitHostPort(hostport) - if host != "" || port > 0 { - break - } - } - return -} - -// ClientStatus returns a span status code and message for an HTTP status code -// value received by a client. -func (c *httpConv) ClientStatus(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 400 { - return codes.Error, "" - } - return codes.Unset, "" -} - -// ServerStatus returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func (c *httpConv) ServerStatus(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 500 { - return codes.Error, "" - } - return codes.Unset, "" -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go deleted file mode 100644 index df97255e4..000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ /dev/null @@ -1,214 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconvutil/netconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -import ( - "net" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -// NetTransport returns a trace attribute describing the transport protocol of the -// passed network. See the net.Dial for information about acceptable network -// values. -func NetTransport(network string) attribute.KeyValue { - return nc.Transport(network) -} - -// netConv are the network semantic convention attributes defined for a version -// of the OpenTelemetry specification. -type netConv struct { - NetHostNameKey attribute.Key - NetHostPortKey attribute.Key - NetPeerNameKey attribute.Key - NetPeerPortKey attribute.Key - NetProtocolName attribute.Key - NetProtocolVersion attribute.Key - NetSockFamilyKey attribute.Key - NetSockPeerAddrKey attribute.Key - NetSockPeerPortKey attribute.Key - NetSockHostAddrKey attribute.Key - NetSockHostPortKey attribute.Key - NetTransportOther attribute.KeyValue - NetTransportTCP attribute.KeyValue - NetTransportUDP attribute.KeyValue - NetTransportInProc attribute.KeyValue -} - -var nc = &netConv{ - NetHostNameKey: semconv.NetHostNameKey, - NetHostPortKey: semconv.NetHostPortKey, - NetPeerNameKey: semconv.NetPeerNameKey, - NetPeerPortKey: semconv.NetPeerPortKey, - NetProtocolName: semconv.NetProtocolNameKey, - NetProtocolVersion: semconv.NetProtocolVersionKey, - NetSockFamilyKey: semconv.NetSockFamilyKey, - NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, - NetSockPeerPortKey: semconv.NetSockPeerPortKey, - NetSockHostAddrKey: semconv.NetSockHostAddrKey, - NetSockHostPortKey: semconv.NetSockHostPortKey, - NetTransportOther: semconv.NetTransportOther, - NetTransportTCP: semconv.NetTransportTCP, - NetTransportUDP: semconv.NetTransportUDP, - NetTransportInProc: semconv.NetTransportInProc, -} - -func (c *netConv) Transport(network string) attribute.KeyValue { - switch network { - case "tcp", "tcp4", "tcp6": - return c.NetTransportTCP - case "udp", "udp4", "udp6": - return c.NetTransportUDP - case "unix", "unixgram", "unixpacket": - return c.NetTransportInProc - default: - // "ip:*", "ip4:*", and "ip6:*" all are considered other. - return c.NetTransportOther - } -} - -// Host returns attributes for a network host address. -func (c *netConv) Host(address string) []attribute.KeyValue { - h, p := splitHostPort(address) - var n int - if h != "" { - n++ - if p > 0 { - n++ - } - } - - if n == 0 { - return nil - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.HostName(h)) - if p > 0 { - attrs = append(attrs, c.HostPort(p)) - } - return attrs -} - -func (c *netConv) HostName(name string) attribute.KeyValue { - return c.NetHostNameKey.String(name) -} - -func (c *netConv) HostPort(port int) attribute.KeyValue { - return c.NetHostPortKey.Int(port) -} - -func family(network, address string) string { - switch network { - case "unix", "unixgram", "unixpacket": - return "unix" - default: - if ip := net.ParseIP(address); ip != nil { - if ip.To4() == nil { - return "inet6" - } - return "inet" - } - } - return "" -} - -// Peer returns attributes for a network peer address. -func (c *netConv) Peer(address string) []attribute.KeyValue { - h, p := splitHostPort(address) - var n int - if h != "" { - n++ - if p > 0 { - n++ - } - } - - if n == 0 { - return nil - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.PeerName(h)) - if p > 0 { - attrs = append(attrs, c.PeerPort(p)) - } - return attrs -} - -func (c *netConv) PeerName(name string) attribute.KeyValue { - return c.NetPeerNameKey.String(name) -} - -func (c *netConv) PeerPort(port int) attribute.KeyValue { - return c.NetPeerPortKey.Int(port) -} - -func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { - return c.NetSockPeerAddrKey.String(addr) -} - -func (c *netConv) SockPeerPort(port int) attribute.KeyValue { - return c.NetSockPeerPortKey.Int(port) -} - -// splitHostPort splits a network address hostport of the form "host", -// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", -// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and -// port. -// -// An empty host is returned if it is not provided or unparsable. A negative -// port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { - port = -1 - - if strings.HasPrefix(hostport, "[") { - addrEnd := strings.LastIndex(hostport, "]") - if addrEnd < 0 { - // Invalid hostport. - return - } - if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { - host = hostport[1:addrEnd] - return - } - } else { - if i := strings.LastIndex(hostport, ":"); i < 0 { - host = hostport - return - } - } - - host, pStr, err := net.SplitHostPort(hostport) - if err != nil { - return - } - - p, err := strconv.ParseUint(pStr, 10, 16) - if err != nil { - return - } - return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. -} - -func netProtocol(proto string) (name string, version string) { - name, version, _ = strings.Cut(proto, "/") - switch name { - case "HTTP": - name = "http" - case "QUIC": - name = "quic" - case "SPDY": - name = "spdy" - default: - name = strings.ToLower(name) - } - return name, version -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index e4c02a429..514ae6753 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,14 +11,14 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" ) // Transport implements the http.RoundTripper interface and wraps @@ -148,11 +148,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } if err == nil { - // For handling response bytes we leverage a callback when the client reads the http response - readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts) - } - + readRecordFunc := func(int64) {} res.Body = newWrappedBody(span, readRecordFunc, res.Body) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 2fe5a1361..dfb53cf1f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,6 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.62.0" + return "0.63.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 6bf3abc41..2b53a25e1 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -7,3 +7,4 @@ ans nam valu thirdparty +addOpt diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 5f69cc027..b01762ffc 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -10,6 +10,7 @@ linters: - depguard - errcheck - errorlint + - gocritic - godot - gosec - govet @@ -86,6 +87,18 @@ linters: deny: - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal desc: Do not use cross-module internal packages. + gocritic: + disabled-checks: + - appendAssign + - commentedOutCode + - dupArg + - hugeParam + - importShadow + - preferDecodeRune + - rangeValCopy + - unnamedResult + - whyNoLint + enable-all: true godot: exclude: # Exclude links. @@ -167,7 +180,10 @@ linters: - fmt.Print - fmt.Printf - fmt.Println + - name: unused-parameter + - name: unused-receiver - name: unnecessary-stmt + - name: use-any - name: useless-break - name: var-declaration - name: var-naming @@ -224,10 +240,6 @@ linters: - linters: - gosec text: 'G402: TLS MinVersion too low.' - paths: - - third_party$ - - builtin$ - - examples$ issues: max-issues-per-linter: 0 max-same-issues: 0 @@ -237,14 +249,12 @@ formatters: - goimports - golines settings: + gofumpt: + extra-rules: true goimports: local-prefixes: - - go.opentelemetry.io + - go.opentelemetry.io/otel golines: max-len: 120 exclusions: generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 40d62fa2e..532850588 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -2,5 +2,8 @@ http://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects +# Weaver model URL for semantic-conventions repository. +https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual +http://4.3.2.1:78/user/123 \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4acc75701..f3abcfdc2 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,93 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 + +This release is the last to support [Go 1.23]. +The next release will require at least [Go 1.24]. + +### Added + +- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772) +- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939) + - `ContainerLabel` + - `DBOperationParameter` + - `DBSystemParameter` + - `HTTPRequestHeader` + - `HTTPResponseHeader` + - `K8SCronJobAnnotation` + - `K8SCronJobLabel` + - `K8SDaemonSetAnnotation` + - `K8SDaemonSetLabel` + - `K8SDeploymentAnnotation` + - `K8SDeploymentLabel` + - `K8SJobAnnotation` + - `K8SJobLabel` + - `K8SNamespaceAnnotation` + - `K8SNamespaceLabel` + - `K8SNodeAnnotation` + - `K8SNodeLabel` + - `K8SPodAnnotation` + - `K8SPodLabel` + - `K8SReplicaSetAnnotation` + - `K8SReplicaSetLabel` + - `K8SStatefulSetAnnotation` + - `K8SStatefulSetLabel` + - `ProcessEnvironmentVariable` + - `RPCConnectRPCRequestMetadata` + - `RPCConnectRPCResponseMetadata` + - `RPCGRPCRequestMetadata` + - `RPCGRPCResponseMetadata` +- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962) +- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968) +- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179) +- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001) +- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`. + Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209) +- The `go.opentelemetry.io/otel/semconv/v1.36.0` package. + The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041) +- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111) +- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`. + Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121) +- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. + Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133) +- Support testing of [Go 1.25]. (#7187) +- The `go.opentelemetry.io/otel/semconv/v1.37.0` package. + The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254) + +### Changed + +- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791) +- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908) +- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094) + +### Fixed + +- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088) +- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195) +- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199) + +### Deprecated + +- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111) +- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166) + +## [0.59.1] 2025-07-21 + +### Changed + +- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046) +- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled. + It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044) + +### Fixed + +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets. + E.g. `{spans}`. (#7044) + ## [1.37.0/0.59.0/0.13.0] 2025-06-25 ### Added @@ -3343,7 +3430,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD +[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 @@ -3439,6 +3527,7 @@ It contains api and sdk for trace and meter. +[Go 1.25]: https://go.dev/doc/go1.25 [Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 945a07d2b..26a03aed1 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @pellared @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125 CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index f9ddc281f..0b3ae855c 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -192,6 +192,35 @@ should have `go test -bench` output in their description. should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) output in their description. +## Dependencies + +This project uses [Go Modules] for dependency management. All modules will use +`go.mod` to explicitly list all direct and indirect dependencies, ensuring a +clear dependency graph. The `go.sum` file for each module will be committed to +the repository and used to verify the integrity of downloaded modules, +preventing malicious tampering. + +This project uses automated dependency update tools (i.e. dependabot, +renovatebot) to manage updates to dependencies. This ensures that dependencies +are kept up-to-date with the latest security patches and features and are +reviewed before being merged. If you would like to propose a change to a +dependency it should be done through a pull request that updates the `go.mod` +file and includes a description of the change. + +See the [versioning and compatibility](./VERSIONING.md) policy for more details +about dependency compatibility. + +[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more + +### Environment Dependencies + +This project does not partition dependencies based on the environment (i.e. +`development`, `staging`, `production`). + +Only the dependencies explicitly included in the released modules have be +tested and verified to work with the released code. No other guarantee is made +about the compatibility of other dependencies. + ## Documentation Each (non-internal, non-test) package must be documented using @@ -233,6 +262,10 @@ For a non-comprehensive but foundational overview of these best practices the [Effective Go](https://golang.org/doc/effective_go.html) documentation is an excellent starting place. +We also recommend following the +[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +that collects common comments made during reviews of Go code. + As a convenience for developers building this project the `make precommit` will format, lint, validate, and in some cases fix the changes you plan to submit. This check will need to pass for your changes to be able to be @@ -586,6 +619,10 @@ See also: ### Testing +We allow using [`testify`](https://github.com/stretchr/testify) even though +it is seen as non-idiomatic according to +the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page. + The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the @@ -640,13 +677,6 @@ should be canceled. ## Approvers and Maintainers -### Triagers - -- [Alex Kats](https://github.com/akats7), Capital One -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent - -### Approvers - ### Maintainers - [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) @@ -655,6 +685,21 @@ should be canceled. - [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) - [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) +For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). + +### Approvers + +- [Flc](https://github.com/flc1125), Independent + +For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). + +### Triagers + +- [Alex Kats](https://github.com/akats7), Capital One +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + +For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). + ### Emeritus - [Aaron Clawson](https://github.com/MadVikingGod) @@ -665,6 +710,8 @@ should be canceled. - [Josh MacDonald](https://github.com/jmacd) - [Liz Fong-Jones](https://github.com/lizthegrey) +For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 4fa423ca0..bc0f1f92d 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -34,9 +34,6 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) MULTIMOD = $(TOOLS)/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink @@ -71,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -284,7 +281,7 @@ semconv-generate: $(SEMCONVKIT) docker run --rm \ -u $(DOCKER_USER) \ --env HOME=/tmp/weaver \ - --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ $(WEAVER_IMAGE) registry generate \ diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5fa1b75c6..6b7ab5f21 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -53,18 +53,25 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | +| Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | | Ubuntu | 1.23 | arm64 | +| macOS 13 | 1.25 | amd64 | | macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | +| macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | +| Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | +| Windows | 1.25 | 386 | | Windows | 1.24 | 386 | | Windows | 1.23 | 386 | diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml new file mode 100644 index 000000000..8041fc62e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml @@ -0,0 +1,203 @@ +header: + schema-version: "1.0.0" + expiration-date: "2026-08-04T00:00:00.000Z" + last-updated: "2025-08-04" + last-reviewed: "2025-08-04" + commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8 + project-url: https://github.com/open-telemetry/opentelemetry-go + project-release: "v1.37.0" + changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md + license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE + +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/dmathieu + - https://github.com/dashpole + - https://github.com/pellared + - https://github.com/XSAM + - https://github.com/MrAlias + release-process: | + See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md + +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true + automated-tools-list: + - automated-tool: dependabot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: renovatebot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: opentelemetrybot + action: allowed + comment: Automated OpenTelemetry actions are accepted. + contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md + +documentation: + - https://pkg.go.dev/go.opentelemetry.io/otel + - https://opentelemetry.io/docs/instrumentation/go/ + +distribution-points: + - pkg:golang/go.opentelemetry.io/otel + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test + - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin + - pkg:golang/go.opentelemetry.io/otel/metric + - pkg:golang/go.opentelemetry.io/otel/sdk + - pkg:golang/go.opentelemetry.io/otel/sdk/metric + - pkg:golang/go.opentelemetry.io/otel/trace + - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus + - pkg:golang/go.opentelemetry.io/otel/log + - pkg:golang/go.opentelemetry.io/otel/log/logtest + - pkg:golang/go.opentelemetry.io/otel/sdk/log + - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog + - pkg:golang/go.opentelemetry.io/otel/schema + +security-artifacts: + threat-model: + threat-model-created: false + comment: | + No formal threat model created yet. + self-assessment: + self-assessment-created: false + comment: | + No formal self-assessment yet. + +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + tool-url: https://github.com/dependabot + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Automated dependency updates. + - tool-type: sast + tool-name: golangci-lint + tool-version: latest + tool-url: https://github.com/golangci/golangci-lint + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Static analysis in CI. + - tool-type: fuzzing + tool-name: OSS-Fuzz + tool-version: latest + tool-url: https://github.com/google/oss-fuzz + tool-rulesets: + - default + integration: + ad-hoc: false + ci: false + before-release: false + comment: | + OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details. + - tool-type: sast + tool-name: CodeQL + tool-version: latest + tool-url: https://github.com/github/codeql + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details. + - tool-type: sca + tool-name: govulncheck + tool-version: latest + tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration. + +security-assessments: + - auditor-name: 7ASecurity + auditor-url: https://7asecurity.com + auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf + report-year: 2023 + comment: | + This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository. + +security-contacts: + - type: email + value: cncf-opentelemetry-security@lists.cncf.io + primary: true + - type: website + value: https://github.com/open-telemetry/opentelemetry-go/security/policy + primary: false + +vulnerability-reporting: + accepts-vulnerability-reports: true + email-contact: cncf-opentelemetry-security@lists.cncf.io + security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy + comment: | + Security issues should be reported via email or GitHub security policy page. + +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod + dependencies-lifecycle: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + Dependency lifecycle managed via go.mod and renovatebot. + env-dependencies-policy: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + See contributing policy for environment usage. diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 318e42fca..6333d34b3 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -78,7 +78,7 @@ func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return &bytes.Buffer{} }, }, @@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string { for iter.Next() { i, keyValue := iter.IndexedAttribute() if i > 0 { - _, _ = buf.WriteRune(',') + _ = buf.WriteByte(',') } copyAndEscape(buf, string(keyValue.Key)) - _, _ = buf.WriteRune('=') + _ = buf.WriteByte('=') if keyValue.Value.Type() == STRING { copyAndEscape(buf, keyValue.Value.AsString()) @@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) + _ = buf.WriteByte(escapeChar) } _, _ = buf.WriteRune(ch) } } -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. +// Valid reports whether this encoder ID was allocated by +// [NewEncoderID]. Invalid encoder IDs will not be cached. func (id EncoderID) Valid() bool { return id.value != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 3eeaa5d44..624ebbe38 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -15,8 +15,8 @@ type Filter func(KeyValue) bool // // If keys is empty a deny-all filter is returned. func NewAllowKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return false } + if len(keys) == 0 { + return func(KeyValue) bool { return false } } allowed := make(map[Key]struct{}, len(keys)) @@ -34,8 +34,8 @@ func NewAllowKeysFilter(keys ...Key) Filter { // // If keys is empty an allow-all filter is returned. func NewDenyKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return true } + if len(keys) == 0 { + return func(KeyValue) bool { return true } } forbid := make(map[Key]struct{}, len(keys)) diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index b76d2bbfd..087550430 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -12,7 +12,7 @@ import ( ) // BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { +func BoolSliceValue(v []bool) any { var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} { } // Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { +func Int64SliceValue(v []int64) any { var zero int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} { } // Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { +func Float64SliceValue(v []float64) any { var zero float64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} { } // StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { +func StringSliceValue(v []string) any { var zero string cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} { } // AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { +func AsBoolSlice(v any) []bool { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool { } // AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { +func AsInt64Slice(v any) []int64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 { } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { +func AsFloat64Slice(v any) []float64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 { } // AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { +func AsStringSlice(v any) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index f2ba89ce4..8df6249f0 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -25,8 +25,8 @@ type oneIterator struct { attr KeyValue } -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. +// Next moves the iterator to the next position. +// Next reports whether there are more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -106,7 +106,8 @@ func (oi *oneIterator) advance() { } } -// Next returns true if there is another attribute available. +// Next moves the iterator to the next position. +// Next reports whether there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index d9a22c650..80a9e5643 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue { } } -// Defined returns true for non-empty keys. +// Defined reports whether the key is not empty. func (k Key) Defined() bool { return len(k) != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 3028f9a40..8c6928ca7 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -13,7 +13,7 @@ type KeyValue struct { Value Value } -// Valid returns if kv is a valid OpenTelemetry attribute. +// Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { return kv.Key.Defined() && kv.Value.Type() != INVALID } diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 6cbefcead..64735d382 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -31,11 +31,11 @@ type ( // Distinct is a unique identifier of a Set. // - // Distinct is designed to be ensures equivalence stability: comparisons - // will return the save value across versions. For this reason, Distinct - // should always be used as a map key instead of a Set. + // Distinct is designed to ensure equivalence stability: comparisons will + // return the same value across versions. For this reason, Distinct should + // always be used as a map key instead of a Set. Distinct struct { - iface interface{} + iface any } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -70,7 +70,7 @@ func (d Distinct) reflectValue() reflect.Value { return reflect.ValueOf(d.iface) } -// Valid returns true if this value refers to a valid Set. +// Valid reports whether this value refers to a valid Set. func (d Distinct) Valid() bool { return d.iface != nil } @@ -120,7 +120,7 @@ func (l *Set) Value(k Key) (Value, bool) { return Value{}, false } -// HasValue tests whether a key is defined in this set. +// HasValue reports whether a key is defined in this set. func (l *Set) HasValue(k Key) bool { if l == nil { return false @@ -155,7 +155,7 @@ func (l *Set) Equivalent() Distinct { return l.equivalent } -// Equals returns true if the argument set is equivalent to this set. +// Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { return l.Equivalent() == o.Equivalent() } @@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct { // computeDistinctFixed computes a Distinct for small slices. It returns nil // if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { +func computeDistinctFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { // computeDistinctReflect computes a Distinct using reflection, works for any // size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { +func computeDistinctReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) { } // MarshalLog is the marshaling function used by the logging system to represent this Set. -func (l Set) MarshalLog() interface{} { +func (l Set) MarshalLog() any { kvs := make(map[string]string) for _, kv := range l.ToSlice() { kvs[string(kv.Key)] = kv.Value.Emit() diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 817eecacf..653c33a86 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -22,7 +22,7 @@ type Value struct { vtype Type numeric uint64 stringly string - slice interface{} + slice any } const ( @@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string { type unknownValueType struct{} -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { +// AsInterface returns Value's data as any. +func (v Value) AsInterface() any { switch v.Type() { case BOOL: return v.AsBool() @@ -262,7 +262,7 @@ func (v Value) Emit() string { func (v Value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string - Value interface{} + Value any } jsonVal.Type = v.Type().String() jsonVal.Value = v.AsInterface() diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 0e1fe2422..f83a448ec 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ // validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. // Baggage name is a valid, non-empty UTF-8 string. func validateBaggageName(s string) bool { - if len(s) == 0 { + if s == "" { return false } @@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool { // validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { - if len(s) == 0 { + if s == "" { return false } diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 49a35b122..d48847ed8 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return errors.New("nil receiver passed to UnmarshalJSON") } - var x interface{} + var x any if err := json.Unmarshal(b, &x); err != nil { return err } @@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) { if !ok { return nil, fmt.Errorf("invalid code: %d", *c) } - return []byte(fmt.Sprintf("%q", str)), nil + return fmt.Appendf(nil, "%q", str), nil } diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 935bd4876..a311fbb48 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python -FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver +FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python +FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index 3f0a518ae..30446bd28 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -94,7 +94,7 @@ func NewUnstarted(client Client) *Exporter { } // MarshalLog is the marshaling function used by the logging system to represent this Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { return struct { Type string Client Client diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index ca4544f0d..d9bfd6e17 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -6,9 +6,10 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + commonpb "go.opentelemetry.io/proto/otlp/common/v1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index 2e7690e43..43359c894 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -4,8 +4,9 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - "go.opentelemetry.io/otel/sdk/instrumentation" commonpb "go.opentelemetry.io/proto/otlp/common/v1" + + "go.opentelemetry.io/otel/sdk/instrumentation" ) func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go index db7b698a5..526bb5e07 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -4,8 +4,9 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - "go.opentelemetry.io/otel/sdk/resource" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" + + "go.opentelemetry.io/otel/sdk/resource" ) // Resource transforms a Resource into an OTLP Resource. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index bf27ef022..379bc8170 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -6,12 +6,13 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptr import ( "math" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // Spans transforms a slice of OpenTelemetry spans into a slice of OTLP @@ -154,7 +155,6 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { for _, otLink := range links { // This redefinition is necessary to prevent otLink.*ID[:] copies // being reused -- in short we need a new otLink per iteration. - otLink := otLink tid := otLink.SpanContext.TraceID() sid := otLink.SpanContext.SpanID() @@ -189,7 +189,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { events := make([]*tracepb.Span_Event, len(es)) // Transform message events - for i := 0; i < len(es); i++ { + for i := range es { events[i] = &tracepb.Span_Event{ Name: es[i].Name, TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 8236c995a..4b4cc76f4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -9,6 +9,8 @@ import ( "sync" "time" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -20,8 +22,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type client struct { @@ -289,7 +289,7 @@ func throttleDelay(s *status.Status) (bool, time.Duration) { } // MarshalLog is the marshaling function used by the logging system to represent this Client. -func (c *client) MarshalLog() interface{} { +func (c *client) MarshalLog() any { return struct { Type string Endpoint string diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 583a8f867..c7b1a5514 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -18,6 +18,8 @@ import ( "sync" "time" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" @@ -25,14 +27,12 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) const contentTypeProto = "application/x-protobuf" var gzPool = sync.Pool{ - New: func() interface{} { + New: func() any { w := gzip.NewWriter(io.Discard) return w }, @@ -104,7 +104,7 @@ func NewClient(opts ...Option) otlptrace.Client { } // Start does nothing in a HTTP client. -func (d *client) Start(ctx context.Context) error { +func (*client) Start(ctx context.Context) error { // nothing to do select { case <-ctx.Done(): @@ -209,7 +209,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc return err } respStr := strings.TrimSpace(respData.String()) - if len(respStr) == 0 { + if respStr == "" { respStr = "(empty)" } bodyErr := fmt.Errorf("body: %s", respStr) @@ -230,7 +230,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc func (d *client) newRequest(body []byte) (request, error) { u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} - r, err := http.NewRequest(http.MethodPost, u.String(), nil) + r, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) if err != nil { return request{Request: r}, err } @@ -246,7 +246,7 @@ func (d *client) newRequest(body []byte) (request, error) { req := request{Request: r} switch Compression(d.cfg.Compression) { case NoCompression: - r.ContentLength = (int64)(len(body)) + r.ContentLength = int64(len(body)) req.bodyReader = bodyReader(body) case GzipCompression: // Ensure the content length is not used. @@ -274,7 +274,7 @@ func (d *client) newRequest(body []byte) (request, error) { } // MarshalLog is the marshaling function used by the logging system to represent this Client. -func (d *client) MarshalLog() interface{} { +func (d *client) MarshalLog() any { return struct { Type string Endpoint string @@ -340,7 +340,7 @@ func (e retryableError) Unwrap() error { return e.err } -func (e retryableError) As(target interface{}) bool { +func (e retryableError) As(target any) bool { if e.err == nil { return false } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index ed2ddce71..3b79c1a0b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index adbca7d34..86d7f4ba0 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -41,22 +41,22 @@ func GetLogger() logr.Logger { // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. -func Info(msg string, keysAndValues ...interface{}) { +func Info(msg string, keysAndValues ...any) { GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { +func Error(err error, msg string, keysAndValues ...any) { GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { +func Debug(msg string, keysAndValues ...any) { GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. -func Warn(msg string, keysAndValues ...interface{}) { +func Warn(msg string, keysAndValues ...any) { GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 49e4ac4fa..bf5cf3119 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -26,6 +26,7 @@ import ( "sync/atomic" "go.opentelemetry.io/auto/sdk" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/metric/LICENSE +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index ebda5026d..051882602 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -20,7 +20,7 @@ type Baggage struct{} var _ TextMapPropagator = Baggage{} // Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { +func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { bStr := baggage.FromContext(ctx).String() if bStr != "" { carrier.Set(baggageHeader, bStr) @@ -30,7 +30,7 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { // Extract returns a copy of parent with the baggage from the carrier added. // If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked // for multiple values extraction. Otherwise, Get is called. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { +func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { if multiCarrier, ok := carrier.(ValuesGetter); ok { return extractMultiBaggage(parent, multiCarrier) } @@ -38,7 +38,7 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context } // Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { +func (Baggage) Fields() []string { return []string{baggageHeader} } diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index 5c8c26ea2..0a32c59aa 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -20,7 +20,7 @@ type TextMapCarrier interface { // must never be done outside of a new major release. // Set stores the key-value pair. - Set(key string, value string) + Set(key, value string) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -88,7 +88,7 @@ func (hc HeaderCarrier) Values(key string) []string { } // Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { +func (hc HeaderCarrier) Set(key, value string) { http.Header(hc).Set(key, value) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6870e316d..6692d2665 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -36,7 +36,7 @@ var ( ) // Inject injects the trace context from ctx into carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { +func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { return @@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont return trace.ContextWithRemoteSpanContext(ctx, sc) } -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { +func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { h := carrier.Get(traceparentHeader) if h == "" { return trace.SpanContext{} @@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool { } // Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { +func (TraceContext) Fields() []string { return []string{traceparentHeader, tracestateHeader} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go index 68d296cbe..1be472e91 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -19,7 +19,7 @@ import ( // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" // will also enable this). var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { + if strings.EqualFold(v, "true") { return v, true } return "", false @@ -59,7 +59,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) { return f.parse(vRaw) } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cefe4ab91..3f20eb7a5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) // Detect returns a *Resource that describes the string as a value // corresponding to attribute.Key as well as the specific schemaURL. -func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { +func (sd stringDetector) Detect(context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 0d8619715..bbe142d20 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type containerIDProvider func() (string, error) @@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup" // Detect returns a *Resource that describes the id of the container. // If no container id found, an empty resource will be returned. -func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) { containerID, err := containerID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 16a062ad8..4a1b017ee 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 781903923..5fed33d4f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type hostIDProvider func() (string, error) @@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) { type hostIDDetector struct{} // Detect returns a *Resource containing the platform specific host id. -func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (hostIDDetector) Detect(context.Context) (*Resource, error) { hostID, err := hostID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 01b4d27a0..51da76e80 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type osDescriptionProvider func() (string, error) @@ -32,7 +32,7 @@ type ( // Detect returns a *Resource that describes the operating system type the // service is running on. -func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { +func (osTypeDetector) Detect(context.Context) (*Resource, error) { osType := runtimeOS() osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) @@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the operating system the // service is running on. -func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (osDescriptionDetector) Detect(context.Context) (*Resource, error) { description, err := osDescription() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index f537e5ca5..7252af79f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -63,12 +63,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string { return values } -// skip returns true if the line is blank or starts with a '#' character, and +// skip reports whether the line is blank or starts with a '#' character, and // therefore should be skipped from processing. func skip(line string) bool { line = strings.TrimSpace(line) - return len(line) == 0 || strings.HasPrefix(line, "#") + return line == "" || strings.HasPrefix(line, "#") } // parse attempts to split the provided line on the first '=' character, and then @@ -76,7 +76,7 @@ func skip(line string) bool { func parse(line string) (string, string, bool) { k, v, found := strings.Cut(line, "=") - if !found || len(k) == 0 { + if !found || k == "" { return "", "", false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 6712ce80d..138e57721 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -112,19 +112,19 @@ type ( // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. -func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (processPIDDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil } // Detect returns a *Resource that describes the name of the process executable. -func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) { executableName := filepath.Base(commandArgs()[0]) return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil } // Detect returns a *Resource that describes the full path of the process executable. -func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) { executablePath, err := executablePath() if err != nil { return nil, err @@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err // Detect returns a *Resource that describes all the command arguments as received // by the process. -func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { +func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil } // Detect returns a *Resource that describes the username of the user that owns the // process. -func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { +func (processOwnerDetector) Detect(context.Context) (*Resource, error) { owner, err := owner() if err != nil { return nil, err @@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the name of the compiler used to compile // this process image. -func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil } // Detect returns a *Resource that describes the version of the runtime of this process. -func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil } // Detect returns a *Resource that describes the runtime of this process. -func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) { runtimeDescription := fmt.Sprintf( "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 09b91e1e1..28e1e4f7e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -112,7 +112,7 @@ func (r *Resource) String() string { } // MarshalLog is the marshaling function used by the logging system to represent this Resource. -func (r *Resource) MarshalLog() interface{} { +func (r *Resource) MarshalLog() any { return struct { Attributes attribute.Set SchemaURL string @@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns whether r and o represent the same resource. Two resources can +// Equal reports whether r and o represent the same resource. Two resources can // be equal even if they have different schema URLs. // // See the documentation on the [Resource] type for the pitfalls of using == diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 6966ed861..9bc3e525d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -6,24 +6,35 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "errors" + "fmt" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) // Defaults for BatchSpanProcessorOptions. const ( - DefaultMaxQueueSize = 2048 - DefaultScheduleDelay = 5000 + DefaultMaxQueueSize = 2048 + // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds. + DefaultScheduleDelay = 5000 + // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds. DefaultExportTimeout = 30000 DefaultMaxExportBatchSize = 512 ) +var queueFull = otelconv.ErrorTypeAttr("queue_full") + // BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) @@ -67,6 +78,11 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 + selfObservabilityEnabled bool + callbackRegistration metric.Registration + spansProcessedCounter otelconv.SDKProcessorSpanProcessed + componentNameAttr attribute.KeyValue + batch []ReadOnlySpan batchMutex sync.Mutex timer *time.Timer @@ -87,11 +103,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) if maxExportBatchSize > maxQueueSize { - if DefaultMaxExportBatchSize > maxQueueSize { - maxExportBatchSize = maxQueueSize - } else { - maxExportBatchSize = DefaultMaxExportBatchSize - } + maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize) } o := BatchSpanProcessorOptions{ @@ -112,6 +124,21 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } + if x.SelfObservability.Enabled() { + bsp.selfObservabilityEnabled = true + bsp.componentNameAttr = componentName() + + var err error + bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs( + bsp.componentNameAttr, + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) + } + } + bsp.stopWait.Add(1) go func() { defer bsp.stopWait.Done() @@ -122,8 +149,61 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO return bsp } +var processorIDCounter atomic.Int64 + +// nextProcessorID returns an identifier for this batch span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextProcessorID() int64 { + return processorIDCounter.Add(1) - 1 +} + +func componentName() attribute.KeyValue { + id := nextProcessorID() + name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id) + return semconv.OTelComponentName(name) +} + +// newBSPObs creates and returns a new set of metrics instruments and a +// registration for a BatchSpanProcessor. It is the caller's responsibility +// to unregister the registration when it is no longer needed. +func newBSPObs( + cmpnt attribute.KeyValue, + qLen func() int64, + qMax int64, +) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) { + meter := otel.GetMeterProvider().Meter( + selfObsScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + err = errors.Join(err, e) + + spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + err = errors.Join(err, e) + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + attrs := metric.WithAttributes(cmpnt, cmpntT) + + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSize.Inst(), qLen(), attrs) + o.ObserveInt64(qCap.Inst(), qMax, attrs) + return nil + }, + qSize.Inst(), + qCap.Inst(), + ) + err = errors.Join(err, e) + + return spansProcessed, reg, err +} + // OnStart method does nothing. -func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} +func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd method enqueues a ReadOnlySpan for later processing. func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -162,6 +242,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } + if bsp.selfObservabilityEnabled { + err = errors.Join(err, bsp.callbackRegistration.Unregister()) + } }) return err } @@ -171,7 +254,7 @@ type forceFlushSpan struct { flushed chan struct{} } -func (f forceFlushSpan) SpanContext() trace.SpanContext { +func (forceFlushSpan) SpanContext() trace.SpanContext { return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) } @@ -274,6 +357,11 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, int64(l), + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor)) + } err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. @@ -382,11 +470,17 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, 1, + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), + bsp.spansProcessedCounter.AttrErrorType(queueFull)) + } return false } } -func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } @@ -396,12 +490,18 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b return true default: atomic.AddUint32(&bsp.dropped, 1) + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, 1, + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), + bsp.spansProcessedCounter.AttrErrorType(queueFull)) + } } return false } // MarshalLog is the marshaling function used by the logging system to represent this Span Processor. -func (bsp *batchSpanProcessor) MarshalLog() interface{} { +func (bsp *batchSpanProcessor) MarshalLog() any { return struct { Type string SpanExporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index 1f60524e3..e58e7f6ed 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. + +See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about +the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index c8d3fb7e3..3649322a6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -32,7 +32,7 @@ type randomIDGenerator struct{} var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { +func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID { sid := trace.SpanID{} for { binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) @@ -45,7 +45,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. -func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { +func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) { tid := trace.TraceID{} sid := trace.SpanID{} for { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md new file mode 100644 index 000000000..feec16fa6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md @@ -0,0 +1,35 @@ +# Experimental Features + +The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These features may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The SDK provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.span.live` +- `otel.sdk.span.started` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go new file mode 100644 index 000000000..2fcbbcc66 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace]. +package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 0e2a2e7c6..37ce2ac87 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,14 +5,20 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" @@ -20,6 +26,7 @@ import ( const ( defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" + selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace" ) // tracerProviderConfig. @@ -45,7 +52,7 @@ type tracerProviderConfig struct { } // MarshalLog is the marshaling function used by the logging system to represent this Provider. -func (cfg tracerProviderConfig) MarshalLog() interface{} { +func (cfg tracerProviderConfig) MarshalLog() any { return struct { SpanProcessors []SpanProcessor SamplerType string @@ -156,8 +163,18 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationScope: is, + provider: p, + instrumentationScope: is, + selfObservabilityEnabled: x.SelfObservability.Enabled(), + } + if t.selfObservabilityEnabled { + var err error + t.spanLiveMetric, t.spanStartedMetric, err = newInst() + if err != nil { + msg := "failed to create self-observability metrics for tracer: %w" + err := fmt.Errorf(msg, err) + otel.Handle(err) + } } p.namedTracer[is] = t } @@ -184,6 +201,23 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } +func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) { + m := otel.GetMeterProvider().Meter( + selfObsScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + spanLiveMetric, e := otelconv.NewSDKSpanLive(m) + err = errors.Join(err, e) + + spanStartedMetric, e := otelconv.NewSDKSpanStarted(m) + err = errors.Join(err, e) + + return spanLiveMetric, spanStartedMetric, err +} + // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index aa7b262d0..689663d48 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler { type alwaysOnSampler struct{} -func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: RecordAndSample, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOnSampler) Description() string { +func (alwaysOnSampler) Description() string { return "AlwaysOnSampler" } @@ -131,14 +131,14 @@ func AlwaysSample() Sampler { type alwaysOffSampler struct{} -func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: Drop, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOffSampler) Description() string { +func (alwaysOffSampler) Description() string { return "AlwaysOffSampler" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 664e13e03..411d9ccdd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -39,7 +39,7 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { } // OnStart does nothing. -func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} +func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd immediately exports a ReadOnlySpan. func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -104,13 +104,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { } // ForceFlush does nothing as there is no data to flush. -func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { +func (*simpleSpanProcessor) ForceFlush(context.Context) error { return nil } // MarshalLog is the marshaling function used by the logging system to represent // this Span Processor. -func (ssp *simpleSpanProcessor) MarshalLog() interface{} { +func (ssp *simpleSpanProcessor) MarshalLog() any { return struct { Type string Exporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index d511d0f27..63aa33780 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -35,7 +35,7 @@ type snapshot struct { var _ ReadOnlySpan = snapshot{} -func (s snapshot) private() {} +func (snapshot) private() {} // Name returns the name of the span. func (s snapshot) Name() string { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 1785a4bbb..b376051fb 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -61,6 +61,7 @@ type ReadOnlySpan interface { InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. + // // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. @@ -165,7 +166,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext { return s.spanContext } -// IsRecording returns if this span is being recorded. If this span has ended +// IsRecording reports whether this span is being recorded. If this span has ended // this will return false. func (s *recordingSpan) IsRecording() bool { if s == nil { @@ -177,7 +178,7 @@ func (s *recordingSpan) IsRecording() bool { return s.isRecording() } -// isRecording returns if this span is being recorded. If this span has ended +// isRecording reports whether this span is being recorded. If this span has ended // this will return false. // // This method assumes s.mu.Lock is held by the caller. @@ -495,6 +496,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() + if s.tracer.selfObservabilityEnabled { + defer func() { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpan(context.Background(), s) + set := spanLiveSet(s.spanContext.IsSampled()) + s.tracer.spanLiveMetric.AddSet(ctx, -1, set) + }() + } + sps := s.tracer.provider.getSpanProcessors() if len(sps) == 0 { return @@ -545,7 +556,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { s.addEvent(semconv.ExceptionEventName, opts...) } -func typeStr(i interface{}) string { +func typeStr(i any) string { t := reflect.TypeOf(i) if t.PkgPath() == "" && t.Name() == "" { // Likely a builtin type. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 0b65ae9ab..e965c4cce 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -7,7 +7,9 @@ import ( "context" "time" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -17,6 +19,10 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope + + selfObservabilityEnabled bool + spanLiveMetric otelconv.SDKSpanLive + spanStartedMetric otelconv.SDKSpanStarted } var _ trace.Tracer = &tracer{} @@ -46,17 +52,25 @@ func (tr *tracer) Start( } s := tr.newSpan(ctx, name, &config) + newCtx := trace.ContextWithSpan(ctx, s) + if tr.selfObservabilityEnabled { + psc := trace.SpanContextFromContext(ctx) + set := spanStartedSet(psc, s) + tr.spanStartedMetric.AddSet(newCtx, 1, set) + } + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { sps := tr.provider.getSpanProcessors() for _, sp := range sps { + // Use original context. sp.sp.OnStart(ctx, rw) } } if rtt, ok := s.(runtimeTracer); ok { - ctx = rtt.runtimeTrace(ctx) + newCtx = rtt.runtimeTrace(newCtx) } - return trace.ContextWithSpan(ctx, s), s + return newCtx, s } type runtimeTracer interface { @@ -112,11 +126,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo if !isRecording(samplingResult) { return tr.newNonRecordingSpan(sc) } - return tr.newRecordingSpan(psc, sc, name, samplingResult, config) + return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config) } // newRecordingSpan returns a new configured recordingSpan. func (tr *tracer) newRecordingSpan( + ctx context.Context, psc, sc trace.SpanContext, name string, sr SamplingResult, @@ -153,6 +168,14 @@ func (tr *tracer) newRecordingSpan( s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) + if tr.selfObservabilityEnabled { + // Propagate any existing values from the context with the new span to + // the measurement context. + ctx = trace.ContextWithSpan(ctx, s) + set := spanLiveSet(s.spanContext.IsSampled()) + tr.spanLiveMetric.AddSet(ctx, 1, set) + } + return s } @@ -160,3 +183,112 @@ func (tr *tracer) newRecordingSpan( func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { return nonRecordingSpan{tracer: tr, sc: sc} } + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedSetKey struct { + parent parentState + sampling samplingState +} + +var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{ + {parentStateNoParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + {parentStateLocalParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + + {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + + {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), + {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), + {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), +} + +func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set { + key := spanStartedSetKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + return spanStartedSetCache[key] +} + +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go index 07117495a..e12fa67e6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go @@ -25,10 +25,10 @@ func NewNoopExporter() *NoopExporter { type NoopExporter struct{} // ExportSpans handles export of spans by dropping them. -func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } +func (*NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } // Shutdown stops the exporter by doing nothing. -func (nsb *NoopExporter) Shutdown(context.Context) error { return nil } +func (*NoopExporter) Shutdown(context.Context) error { return nil } var _ trace.SpanExporter = (*InMemoryExporter)(nil) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go index 732669a17..ca63038f3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -47,14 +47,14 @@ func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) { // Shutdown does nothing. // // This method is safe to be called concurrently. -func (sr *SpanRecorder) Shutdown(context.Context) error { +func (*SpanRecorder) Shutdown(context.Context) error { return nil } // ForceFlush does nothing. // // This method is safe to be called concurrently. -func (sr *SpanRecorder) ForceFlush(context.Context) error { +func (*SpanRecorder) ForceFlush(context.Context) error { return nil } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go index cd2cc30ca..12b384b08 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -37,7 +37,7 @@ func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { } ro := make([]tracesdk.ReadOnlySpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { ro[i] = s[i].Snapshot() } return ro diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go deleted file mode 100644 index b84dd2c5e..000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// version is the current release version of the metric SDK in use. -func version() string { - return "1.16.0-rc.1" -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index c0217af6b..7f97cc31e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go index e9eb57734..58b5eddef 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -104,7 +104,7 @@ func (sc *SemanticConventions) NetAttributesFromHTTPRequest( // It handles both IPv4 and IPv6 addresses. If the host portion is not recognized // as a valid IPv4 or IPv6 address, the `ip` result will be empty and the // host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { +func hostIPNamePort(hostWithPort string) (ip, name string, port int) { var ( hostPart, portPart string parsedPort uint64 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md deleted file mode 100644 index 82e1f46b4..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.20.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go deleted file mode 100644 index 6685c392b..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ /dev/null @@ -1,1198 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes HTTP attributes. -const ( - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. It represents the hTTP request method. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. It represents the [HTTP - // response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: ConditionallyRequired (If and only if one was - // received/sent.) - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. It represents the hTTP request method. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. It represents the [HTTP response -// status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTP Server spans attributes -const ( - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. It represents the URI scheme identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route (path template in - // the format used by the respective server framework). See note below - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if it's available) - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) - // if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. It represents the URI scheme identifying the used -// protocol. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route (path template in the -// format used by the respective server framework). See note below -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the name identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'click', 'exception' - EventNameKey = attribute.Key("event.name") - - // EventDomainKey is the attribute Key conforming to the "event.domain" - // semantic conventions. It represents the domain identifies the business - // context for the events. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: Events across different domains may have same `event.name`, yet be - // unrelated events. - EventDomainKey = attribute.Key("event.domain") -) - -var ( - // Events from browser apps - EventDomainBrowser = EventDomainKey.String("browser") - // Events from mobile apps - EventDomainDevice = EventDomainKey.String("device") - // Events from Kubernetes - EventDomainK8S = EventDomainKey.String("k8s") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the name identifies the event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. It represents the transport protocol used. See - // note below. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - NetTransportKey = attribute.Key("net.transport") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. It represents the application - // layer protocol used. The value SHOULD be normalized to lowercase. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. It represents the version - // of the application layer protocol used. See note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `net.protocol.version` refers to the version of the protocol used - // and might be different from the protocol client's version. If the HTTP - // client used has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. It represents the remote - // socket peer name. - // - // Type: string - // RequirementLevel: Recommended (If available and different from - // `net.peer.name` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 'proxy.example.com' - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. It represents the remote - // socket peer address: IPv4 or IPv6 for internet protocols, path for local - // communication, - // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '127.0.0.1', '/tmp/mysql.sock' - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. It represents the remote - // socket peer port. - // - // Type: int - // RequirementLevel: Recommended (If defined for the address family and if - // different than `net.peer.port` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 16456 - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. It represents the protocol - // [address - // family](https://man7.org/linux/man-pages/man7/address_families.7.html) - // which is used for communication. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If different than `inet` and if - // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers - // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in - // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support - // instrumentations that follow previous versions of this document.) - // Stability: stable - // Examples: 'inet6', 'bluetooth' - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. It represents the logical remote hostname, see - // note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com' - // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an - // extra DNS lookup. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. It represents the logical remote port number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. It represents the logical local hostname or - // similar, see note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. It represents the logical local port number, - // preferably the one that the peer used to connect - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 8080 - NetHostPortKey = attribute.Key("net.host.port") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. It represents the local - // socket address. Useful in case of a multi-IP host. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '192.168.0.1' - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. It represents the local - // socket port number. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If defined for the address - // family and if different than `net.host.port` and if `net.sock.host.addr` - // is set. In other cases, it is still recommended to set this.) - // Stability: stable - // Examples: 35555 - NetSockHostPortKey = attribute.Key("net.sock.host.port") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // IPv4 address - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. It represents the application -// layer protocol used. The value SHOULD be normalized to lowercase. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. It represents the version of -// the application layer protocol used. See note below. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. It represents the remote socket -// peer name. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. It represents the remote socket -// peer address: IPv4 or IPv6 for internet protocols, path for local -// communication, -// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. It represents the remote socket -// peer port. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. It represents the logical remote -// hostname, see note below. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. It represents the logical remote port -// number -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. It represents the logical local -// hostname or similar, see note below. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. It represents the logical local port -// number, preferably the one that the peer used to connect -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. It represents the local socket -// address. Useful in case of a multi-IP host. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. It represents the local socket -// port number. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostConnectionTypeKey is the attribute Key conforming to the - // "net.host.connection.type" semantic conventions. It represents the - // internet connection type currently being used by the host. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'wifi' - NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - - // NetHostConnectionSubtypeKey is the attribute Key conforming to the - // "net.host.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'LTE' - NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - - // NetHostCarrierNameKey is the attribute Key conforming to the - // "net.host.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'sprint' - NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - - // NetHostCarrierMccKey is the attribute Key conforming to the - // "net.host.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '310' - NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - - // NetHostCarrierMncKey is the attribute Key conforming to the - // "net.host.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '001' - NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - - // NetHostCarrierIccKey is the attribute Key conforming to the - // "net.host.carrier.icc" semantic conventions. It represents the ISO - // 3166-1 alpha-2 2-character country code associated with the mobile - // carrier network. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'DE' - NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") -) - -var ( - // wifi - NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") - // wired - NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") - // cell - NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") - // unavailable - NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") - // unknown - NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") - // EDGE - NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") - // UMTS - NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") - // CDMA - NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") - // HSPA - NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") - // IDEN - NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") - // LTE - NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") - // EHRPD - NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") - // GSM - NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") -) - -// NetHostCarrierName returns an attribute KeyValue conforming to the -// "net.host.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetHostCarrierName(val string) attribute.KeyValue { - return NetHostCarrierNameKey.String(val) -} - -// NetHostCarrierMcc returns an attribute KeyValue conforming to the -// "net.host.carrier.mcc" semantic conventions. It represents the mobile -// carrier country code. -func NetHostCarrierMcc(val string) attribute.KeyValue { - return NetHostCarrierMccKey.String(val) -} - -// NetHostCarrierMnc returns an attribute KeyValue conforming to the -// "net.host.carrier.mnc" semantic conventions. It represents the mobile -// carrier network code. -func NetHostCarrierMnc(val string) attribute.KeyValue { - return NetHostCarrierMncKey.String(val) -} - -// NetHostCarrierIcc returns an attribute KeyValue conforming to the -// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetHostCarrierIcc(val string) attribute.KeyValue { - return NetHostCarrierIccKey.String(val) -} - -// Semantic conventions for HTTP client and server Spans. -const ( - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. It represents the - // size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. It represents the - // size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") -) - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. It represents the size -// of the request payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. It represents the size -// of the response payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// Semantic convention describing per-message attributes populated on messaging -// spans or links. -const ( - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the [conversation ID](#conversations) identifying the conversation to - // which the message belongs, represented as a string. Sometimes called - // "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to - // the "messaging.message.payload_size_bytes" semantic conventions. It - // represents the (uncompressed) size of the message payload in bytes. Also - // use this attribute if it is unknown whether the compressed or - // uncompressed payload size is reported. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") - - // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key - // conforming to the "messaging.message.payload_compressed_size_bytes" - // semantic conventions. It represents the compressed size of the message - // payload in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") -) - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the [conversation ID](#conversations) identifying the -// conversation to which the message belongs, represented as a string. -// Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming -// to the "messaging.message.payload_size_bytes" semantic conventions. It -// represents the (uncompressed) size of the message payload in bytes. Also use -// this attribute if it is unknown whether the compressed or uncompressed -// payload size is reported. -func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadSizeBytesKey.Int(val) -} - -// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue -// conforming to the "messaging.message.payload_compressed_size_bytes" semantic -// conventions. It represents the compressed size of the message payload in -// bytes. -func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) -} - -// Semantic convention for attributes that describe messaging destination on -// broker -const ( - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker does not have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") -) - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// Semantic convention for attributes that describe messaging source on broker -const ( - // MessagingSourceNameKey is the attribute Key conforming to the - // "messaging.source.name" semantic conventions. It represents the message - // source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Source name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker does not have such notion, the source name SHOULD uniquely - // identify the broker. - MessagingSourceNameKey = attribute.Key("messaging.source.name") - - // MessagingSourceTemplateKey is the attribute Key conforming to the - // "messaging.source.template" semantic conventions. It represents the low - // cardinality representation of the messaging source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Source names could be constructed from templates. An example would - // be a source name involving a user name or product id. Although the - // source name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingSourceTemplateKey = attribute.Key("messaging.source.template") - - // MessagingSourceTemporaryKey is the attribute Key conforming to the - // "messaging.source.temporary" semantic conventions. It represents a - // boolean that is true if the message source is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") - - // MessagingSourceAnonymousKey is the attribute Key conforming to the - // "messaging.source.anonymous" semantic conventions. It represents a - // boolean that is true if the message source is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") -) - -// MessagingSourceName returns an attribute KeyValue conforming to the -// "messaging.source.name" semantic conventions. It represents the message -// source name -func MessagingSourceName(val string) attribute.KeyValue { - return MessagingSourceNameKey.String(val) -} - -// MessagingSourceTemplate returns an attribute KeyValue conforming to the -// "messaging.source.template" semantic conventions. It represents the low -// cardinality representation of the messaging source name -func MessagingSourceTemplate(val string) attribute.KeyValue { - return MessagingSourceTemplateKey.String(val) -} - -// MessagingSourceTemporary returns an attribute KeyValue conforming to the -// "messaging.source.temporary" semantic conventions. It represents a boolean -// that is true if the message source is temporary and might not exist anymore -// after messages are processed. -func MessagingSourceTemporary(val bool) attribute.KeyValue { - return MessagingSourceTemporaryKey.Bool(val) -} - -// MessagingSourceAnonymous returns an attribute KeyValue conforming to the -// "messaging.source.anonymous" semantic conventions. It represents a boolean -// that is true if the message source is anonymous (could be unnamed or have -// auto-generated name). -func MessagingSourceAnonymous(val bool) attribute.KeyValue { - return MessagingSourceAnonymousKey.Bool(val) -} - -// Attributes for RabbitMQ -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If not empty.) - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// Attributes for Apache Kafka -const ( - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaClientIDKey is the attribute Key conforming to the - // "messaging.kafka.client_id" semantic conventions. It represents the - // client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the - // "messaging.kafka.source.partition" semantic conventions. It represents - // the partition the message is received from. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If value is `true`. When - // missing, the value is assumed to be `false`.) - // Stability: stable - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaClientID returns an attribute KeyValue conforming to the -// "messaging.kafka.client_id" semantic conventions. It represents the client -// ID for the Consumer or Producer that is handling the message. -func MessagingKafkaClientID(val string) attribute.KeyValue { - return MessagingKafkaClientIDKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to -// the "messaging.kafka.source.partition" semantic conventions. It represents -// the partition the message is received from. -func MessagingKafkaSourcePartition(val int) attribute.KeyValue { - return MessagingKafkaSourcePartitionKey.Int(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// Attributes for Apache RocketMQ -const ( - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqClientIDKey is the attribute Key conforming to the - // "messaging.rocketmq.client_id" semantic conventions. It represents the - // unique identifier for each client. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myhost@8742@s8083jm' - MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delay time level is not specified.) - // Stability: stable - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delivery timestamp is not specified.) - // Stability: stable - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) - // Stability: stable - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqClientID returns an attribute KeyValue conforming to the -// "messaging.rocketmq.client_id" semantic conventions. It represents the -// unique identifier for each client. -func MessagingRocketmqClientID(val string) attribute.KeyValue { - return MessagingRocketmqClientIDKey.String(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go deleted file mode 100644 index 637763932..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} - -// The attributes used to report a single exception associated with a span. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example above](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go deleted file mode 100644 index 9c1840631..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go deleted file mode 100644 index 3d44dae27..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ /dev/null @@ -1,2060 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) -// on Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// Heroku dyno metadata -const ( - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") -) - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// A container instance. -const ( - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageTagKey is the attribute Key conforming to the - // "container.image.tag" semantic conventions. It represents the container - // image tag. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageTag returns an attribute KeyValue conforming to the -// "container.image.tag" semantic conventions. It represents the container -// image tag. -func ContainerImageTag(val string) attribute.KeyValue { - return ContainerImageTagKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment -// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka -// deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// The device on which the process represented by this resource is running. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of - // the device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// A serverless instance. -const ( - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run:** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// A host is defined as a general computing instance. -const ( - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - HostArchKey = attribute.Key("host.arch") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID. For Cloud, this - // value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image as defined in [Version -// Attributes](README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// A Kubernetes Cluster. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// A Kubernetes Node object. -const ( - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// A Kubernetes Namespace. -const ( - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// A Kubernetes Pod object. -const ( - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// A container in a -// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") -) - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// A Kubernetes ReplicaSet object. -const ( - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// A Kubernetes Deployment object. -const ( - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// A Kubernetes StatefulSet object. -const ( - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// A Kubernetes DaemonSet object. -const ( - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// A Kubernetes Job object. -const ( - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// A Kubernetes CronJob object. -const ( - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - OSTypeKey = attribute.Key("os.type") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](../../resource/semantic_conventions/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// The single (language) runtime instance which is monitored. -const ( - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// A service instance. -const ( - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryAutoVersionKey is the attribute Key conforming to the - // "telemetry.auto.version" semantic conventions. It represents the version - // string of the auto instrumentation agent, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -// TelemetryAutoVersion returns an attribute KeyValue conforming to the -// "telemetry.auto.version" semantic conventions. It represents the version -// string of the auto instrumentation agent, if used. -func TelemetryAutoVersion(val string) attribute.KeyValue { - return TelemetryAutoVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. It represents the deprecated, - // use the `otel.scope.name` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. It represents the - // deprecated, use the `otel.scope.version` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. It represents the deprecated, use -// the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. It represents the deprecated, -// use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go deleted file mode 100644 index 90b1b0452..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ /dev/null @@ -1,2599 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") -) - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span does not depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The attributes used to perform database client calls. -const ( - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - DBSystemKey = attribute.Key("db.system") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: ConditionallyRequired (If applicable.) - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Recommended (Should be collected by default only if - // there is sanitization that excludes sensitive information.) - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If `db.statement` is not - // applicable.) - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// Connection-level attributes for Microsoft SQL Server -const ( - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no - // longer required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// Call-level attributes for Cassandra -const ( - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary table that the operation is acting upon, including the keyspace - // name (if applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary table that the operation is acting upon, including the keyspace name -// (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// Call-level attributes for Redis -const ( - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If other than the default - // database (`0`).) - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// Call-level attributes for MongoDB -const ( - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the collection -// being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// Call-level attributes for SQL databases -const ( - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// Call-level attributes for Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (when performing one of the - // operations in this list) - // Stability: stable - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as - // default)) - // Stability: stable - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if available) - // Stability: stable - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: ConditionallyRequired (if response was received) - // Stability: stable - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: ConditionallyRequired (when response was received and - // contained sub-code.) - // Stability: stable - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: ConditionallyRequired (when available) - // Stability: stable - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: For the server/consumer span on the incoming side, - // `faas.trigger` MUST be set. - // - // Clients invoking FaaS instances usually cannot set `faas.trigger`, - // since they would typically need to look in the payload to determine - // the event type. If clients set it, it should be the same as the - // trigger that corresponding incoming would have (i.e., this has - // nothing to do with the underlying transport used to make the API - // call to invoke the lambda, which is often HTTP). - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// Contains additional attributes for outgoing FaaS spans. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](../../resource/semantic_conventions/README.md#service) - // of the remote service. SHOULD be equal to the actual `service.name` - // resource attribute of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](../../resource/semantic_conventions/README.md#service) of -// the remote service. SHOULD be equal to the actual `service.name` resource -// attribute of the remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") -) - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// Semantic Convention for HTTP Client -const ( - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. It represents the full HTTP request URL in the form - // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is - // not transmitted over HTTP, but if it is known, it should be included - // nevertheless. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the - // attribute's value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - - // HTTPResendCountKey is the attribute Key conforming to the - // "http.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Recommended (if and only if request was retried.) - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPResendCountKey = attribute.Key("http.resend_count") -) - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. It represents the full HTTP request URL in the form -// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not -// transmitted over HTTP, but if it is known, it should be included -// nevertheless. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPResendCount returns an attribute KeyValue conforming to the -// "http.resend_count" semantic conventions. It represents the ordinal number -// of request resending attempt (for any reason, including redirects). -func HTTPResendCount(val int) attribute.KeyValue { - return HTTPResendCountKey.Int(val) -} - -// Semantic Convention for HTTP Server -const ( - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. It represents the full request target as passed in - // a HTTP request line or equivalent. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '/users/12314/?q=ddds' - HTTPTargetKey = attribute.Key("http.target") - - // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" - // semantic conventions. It represents the IP address of the original - // client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.sock.peer.addr`, which - // would - // identify the network-level peer, which may be a proxy. - // - // This attribute should be set when a source of information different - // from the one used for `net.sock.peer.addr`, is available even if that - // other - // source just confirms the same value as `net.sock.peer.addr`. - // Rationale: For `net.sock.peer.addr`, one typically does not know if it - // comes from a proxy, reverse proxy, or the actual client. Setting - // `http.client_ip` when it's the same as `net.sock.peer.addr` means that - // one is at least somewhat confident that the address is not that of - // the closest proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. It represents the full request target as passed in a -// HTTP request line or equivalent. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPClientIP returns an attribute KeyValue conforming to the -// "http.client_ip" semantic conventions. It represents the IP address of the -// original client behind all proxies, if known (e.g. from -// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). -func HTTPClientIP(val string) attribute.KeyValue { - return HTTPClientIPKey.String(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") - - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// General attributes used in messaging systems. -const ( - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents a string - // identifying the messaging system. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation as defined in the [Operation - // names](#operation-names) section above. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the span describes an - // operation on a batch of messages.) - // Stability: stable - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") -) - -var ( - // publish - MessagingOperationPublish = MessagingOperationKey.String("publish") - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// MessagingSystem returns an attribute KeyValue conforming to the -// "messaging.system" semantic conventions. It represents a string identifying -// the messaging system. -func MessagingSystem(val string) attribute.KeyValue { - return MessagingSystemKey.String(val) -} - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// Semantic convention for a consumer of messages received from a messaging -// system -const ( - // MessagingConsumerIDKey is the attribute Key conforming to the - // "messaging.consumer.id" semantic conventions. It represents the - // identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if - // both are present, or only `messaging.kafka.consumer.group`. For brokers, - // such as RabbitMQ and Artemis, set it to the `client_id` of the client - // consuming the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mygroup - client-6' - MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") -) - -// MessagingConsumerID returns an attribute KeyValue conforming to the -// "messaging.consumer.id" semantic conventions. It represents the identifier -// for the consumer receiving a message. For Kafka, set it to -// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both -// are present, or only `messaging.kafka.consumer.group`. For brokers, such as -// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the -// message. -func MessagingConsumerID(val string) attribute.KeyValue { - return MessagingConsumerIDKey.String(val) -} - -// Semantic conventions for remote procedure calls. -const ( - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCSystemKey = attribute.Key("rpc.system") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// Tech-specific attributes for gRPC. -const ( - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // does not specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If other than the default - // version (`1.0`)) - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If response is not successful.) - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// does not specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// Tech-specific attributes for Connect RPC. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If response is not successful - // and if error code available.) - // Stability: stable - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go index 5b5666257..98c0fddaa 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go @@ -3467,6 +3467,13 @@ func ContainerImageTags(val ...string) attribute.KeyValue { return ContainerImageTagsKey.StringSlice(val) } +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + // ContainerName returns an attribute KeyValue conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. @@ -3794,6 +3801,22 @@ func DBOperationName(val string) attribute.KeyValue { return DBOperationNameKey.String(val) } +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + // DBQuerySummary returns an attribute KeyValue conforming to the // "db.query.summary" semantic conventions. It represents the low cardinality // summary of a database query. @@ -7312,6 +7335,14 @@ func HTTPRequestBodySize(val int) attribute.KeyValue { return HTTPRequestBodySizeKey.Int(val) } +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + // HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the // "http.request.method_original" semantic conventions. It represents the // original HTTP method sent by the client in the request line. @@ -7347,6 +7378,14 @@ func HTTPResponseBodySize(val int) attribute.KeyValue { return HTTPResponseBodySizeKey.Int(val) } +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + // HTTPResponseSize returns an attribute KeyValue conforming to the // "http.response.size" semantic conventions. It represents the total size of the // response in bytes. This should be the total number of bytes sent over the @@ -8001,6 +8040,22 @@ func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { return K8SContainerStatusLastTerminatedReasonKey.String(val) } +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. @@ -8014,6 +8069,20 @@ func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the DaemonSet. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label key-value +// pairs placed on the DaemonSet. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. @@ -8028,6 +8097,20 @@ func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the Deployment. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label key-value +// pairs placed on the Deployment. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. @@ -8054,6 +8137,20 @@ func K8SHPAUID(val string) attribute.KeyValue { return K8SHPAUIDKey.String(val) } +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the Job. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label key-value pairs placed on the +// Job. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { @@ -8066,6 +8163,20 @@ func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the Namespace. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label key-value +// pairs placed on the Namespace. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. @@ -8073,6 +8184,22 @@ func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + // K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { @@ -8085,6 +8212,21 @@ func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { @@ -8097,6 +8239,20 @@ func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the ReplicaSet. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label key-value +// pairs placed on the ReplicaSet. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. @@ -8139,6 +8295,20 @@ func K8SResourceQuotaUID(val string) attribute.KeyValue { return K8SResourceQuotaUIDKey.String(val) } +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation key-value pairs placed on the StatefulSet. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label +// key-value pairs placed on the StatefulSet. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. @@ -10497,6 +10667,14 @@ func ProcessCreationTime(val string) attribute.KeyValue { return ProcessCreationTimeKey.String(val) } +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + // ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the // "process.executable.build_id.gnu" semantic conventions. It represents the GNU // build ID as found in the `.note.gnu.build-id` ELF section (hex string). @@ -10965,6 +11143,38 @@ const ( RPCSystemKey = attribute.Key("rpc.system") ) +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + // RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` // property of response if it is an error response. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go new file mode 100644 index 000000000..19bf02246 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return ErrorTypeOther + } + return ErrorTypeKey.String(value) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md new file mode 100644 index 000000000..248054789 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md @@ -0,0 +1,41 @@ + +# Migration from v1.36.0 to v1.37.0 + +The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ContainerRuntime` +- `ContainerRuntimeKey` +- `GenAIOpenAIRequestServiceTierAuto` +- `GenAIOpenAIRequestServiceTierDefault` +- `GenAIOpenAIRequestServiceTierKey` +- `GenAIOpenAIResponseServiceTier` +- `GenAIOpenAIResponseServiceTierKey` +- `GenAIOpenAIResponseSystemFingerprint` +- `GenAIOpenAIResponseSystemFingerprintKey` +- `GenAISystemAWSBedrock` +- `GenAISystemAnthropic` +- `GenAISystemAzureAIInference` +- `GenAISystemAzureAIOpenAI` +- `GenAISystemCohere` +- `GenAISystemDeepseek` +- `GenAISystemGCPGemini` +- `GenAISystemGCPGenAI` +- `GenAISystemGCPVertexAI` +- `GenAISystemGroq` +- `GenAISystemIBMWatsonxAI` +- `GenAISystemKey` +- `GenAISystemMistralAI` +- `GenAISystemOpenAI` +- `GenAISystemPerplexity` +- `GenAISystemXai` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md new file mode 100644 index 000000000..d795247f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.37.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.37.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go new file mode 100644 index 000000000..b6b27498f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go @@ -0,0 +1,15193 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found in the + // [Android API levels documentation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found in the +// [Android API levels documentation]. +// +// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", + // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" + AppBuildIDKey = attribute.Key("app.build_id") + + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppJankFrameCountKey is the attribute Key conforming to the + // "app.jank.frame_count" semantic conventions. It represents a number of frame + // renders that experienced jank. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 9, 42 + // Note: Depending on platform limitations, the value provided MAY be + // approximation. + AppJankFrameCountKey = attribute.Key("app.jank.frame_count") + + // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" + // semantic conventions. It represents the time period, in seconds, for which + // this jank is being reported. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 5.0, 10.24 + AppJankPeriodKey = attribute.Key("app.jank.period") + + // AppJankThresholdKey is the attribute Key conforming to the + // "app.jank.threshold" semantic conventions. It represents the minimum + // rendering threshold for this jank, in seconds. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.016, 0.7, 1.024 + AppJankThresholdKey = attribute.Key("app.jank.threshold") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the application. +func AppBuildID(val string) attribute.KeyValue { + return AppBuildIDKey.String(val) +} + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppJankFrameCount returns an attribute KeyValue conforming to the +// "app.jank.frame_count" semantic conventions. It represents a number of frame +// renders that experienced jank. +func AppJankFrameCount(val int) attribute.KeyValue { + return AppJankFrameCountKey.Int(val) +} + +// AppJankPeriod returns an attribute KeyValue conforming to the +// "app.jank.period" semantic conventions. It represents the time period, in +// seconds, for which this jank is being reported. +func AppJankPeriod(val float64) attribute.KeyValue { + return AppJankPeriodKey.Float64(val) +} + +// AppJankThreshold returns an attribute KeyValue conforming to the +// "app.jank.threshold" semantic conventions. It represents the minimum rendering +// threshold for this jank, in seconds. +func AppJankThreshold(val float64) attribute.KeyValue { + return AppJankThresholdKey.Float64(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // Amazon EC2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // Amazon Fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") + + // AzureResourceProviderNamespaceKey is the attribute Key conforming to the + // "azure.resource_provider.namespace" semantic conventions. It represents the + // [Azure Resource Provider Namespace] as recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") + + // AzureServiceRequestIDKey is the attribute Key conforming to the + // "azure.service.request.id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the +// "azure.resource_provider.namespace" semantic conventions. It represents the +// [Azure Resource Provider Namespace] as recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzureResourceProviderNamespace(val string) attribute.KeyValue { + return AzureResourceProviderNamespaceKey.String(val) +} + +// AzureServiceRequestID returns an attribute KeyValue conforming to the +// "azure.service.request.id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzureServiceRequestID(val string) attribute.KeyValue { + return AzureServiceRequestIDKey.String(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // Strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // Bounded Staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // Session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // Eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // Consistent Prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // All + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // Each Quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // Quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // Local Quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // One + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // Two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // Three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // Local One + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // Any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // Serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // Local Serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeDescriptionKey is the attribute Key conforming to the + // "container.runtime.description" semantic conventions. It represents a + // description about the runtime which could include, for example details about + // the CRI/API version being used or other customisations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker://19.3.1 - CRI: 1.22.0" + ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") + + // ContainerRuntimeNameKey is the attribute Key conforming to the + // "container.runtime.name" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeNameKey = attribute.Key("container.runtime.name") + + // ContainerRuntimeVersionKey is the attribute Key conforming to the + // "container.runtime.version" semantic conventions. It represents the version + // of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0.0 + ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntimeDescription returns an attribute KeyValue conforming to the +// "container.runtime.description" semantic conventions. It represents a +// description about the runtime which could include, for example details about +// the CRI/API version being used or other customisations. +func ContainerRuntimeDescription(val string) attribute.KeyValue { + return ContainerRuntimeDescriptionKey.String(val) +} + +// ContainerRuntimeName returns an attribute KeyValue conforming to the +// "container.runtime.name" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntimeName(val string) attribute.KeyValue { + return ContainerRuntimeNameKey.String(val) +} + +// ContainerRuntimeVersion returns an attribute KeyValue conforming to the +// "container.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ContainerRuntimeVersion(val string) attribute.KeyValue { + return ContainerRuntimeVersionKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // User + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // System + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // Nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // Idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // IO Wait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // Interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // Steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // Kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic + // conventions. It represents the list of IPv4 or IPv6 addresses resolved during + // DNS lookup. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + DNSAnswersKey = attribute.Key("dns.answers") + + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" +// semantic conventions. It represents the list of IPv4 or IPv6 addresses +// resolved during DNS lookup. +func DNSAnswers(val ...string) attribute.KeyValue { + return DNSAnswersKey.StringSlice(val) +} + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: release_candidate + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: release_candidate + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: release_candidate + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: release_candidate + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: release_candidate + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: release_candidate + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: release_candidate + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: release_candidate + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: release_candidate + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifier" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIInputMessagesKey is the attribute Key conforming to the + // "gen_ai.input.messages" semantic conventions. It represents the chat history + // provided to the model as an input. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n + // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n + // "parts": [\n {\n "type": "tool_call",\n "id": + // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n + // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n + // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n + // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" + // Note: Instrumentations MUST follow [Input messages JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Messages MUST be provided in the order they were sent to the model. + // Instrumentations MAY provide a way for users to filter or truncate + // input messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputMessagesKey is the attribute Key conforming to the + // "gen_ai.output.messages" semantic conventions. It represents the messages + // returned by the model where each message represents a specific model response + // (choice, candidate). + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n + // "content": "The weather in Paris is currently rainy with a temperature of + // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" + // Note: Instrumentations MUST follow [Output messages JSON schema] + // + // Each message represents a single output choice/candidate generated by + // the model. Each message corresponds to exactly one generation + // (choice/candidate) and vice versa - one choice cannot be split across + // multiple messages or one message cannot contain parts from multiple choices. + // + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // output messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIProviderNameKey is the attribute Key conforming to the + // "gen_ai.provider.name" semantic conventions. It represents the Generative AI + // provider as identified by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The attribute SHOULD be set based on the instrumentation's best + // knowledge and may differ from the actual model provider. + // + // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms + // are accessible using the OpenAI REST API and corresponding client libraries, + // but may proxy or host models from different providers. + // + // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` + // attributes may help identify the actual system in use. + // + // The `gen_ai.provider.name` attribute acts as a discriminator that + // identifies the GenAI telemetry format flavor specific to that provider + // within GenAI semantic conventions. + // It SHOULD be set consistently with provider-specific attributes and signals. + // For example, GenAI spans, metrics, and events related to AWS Bedrock + // should have the `gen_ai.provider.name` set to `aws.bedrock` and include + // applicable `aws.bedrock.*` attributes and are not expected to include + // `openai.*` attributes. + GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemInstructionsKey is the attribute Key conforming to the + // "gen_ai.system_instructions" semantic conventions. It represents the system + // message or instructions provided to the GenAI model separately from the chat + // history. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet + // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": + // "text",\n "content": "You are a language translator."\n },\n {\n "type": + // "text",\n "content": "Your mission is to translate text in English to + // French."\n }\n]\n" + // Note: This attribute SHOULD be used when the corresponding provider or API + // allows to provide system instructions or messages separately from the + // chat history. + // + // Instructions that are part of the chat history SHOULD be recorded in + // `gen_ai.input.messages` attribute instead. + // + // Instrumentations MUST follow [System instructions JSON schema]. + // + // When recorded on spans, it MAY be recorded as a JSON string if structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // system instructions. + // + // > [!Warning] + // > This attribute may contain sensitive information. + // + // See [Recording content on attributes] + // section for more details. + // + // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.provider.name +var ( + // [OpenAI] + // Stability: development + // + // [OpenAI]: https://openai.com/ + GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") + // [Vertex AI] + // Stability: development + // + // [Vertex AI]: https://cloud.google.com/vertex-ai + GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") + // [Gemini] + // Stability: development + // + // [Gemini]: https://cloud.google.com/products/gemini + GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") + // [Anthropic] + // Stability: development + // + // [Anthropic]: https://www.anthropic.com/ + GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") + // [Cohere] + // Stability: development + // + // [Cohere]: https://cohere.com/ + GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") + // [Azure OpenAI] + // Stability: development + // + // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ + GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") + // [IBM Watsonx AI] + // Stability: development + // + // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai + GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") + // [AWS Bedrock] + // Stability: development + // + // [AWS Bedrock]: https://aws.amazon.com/bedrock + GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") + // [Perplexity] + // Stability: development + // + // [Perplexity]: https://www.perplexity.ai/ + GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") + // [xAI] + // Stability: development + // + // [xAI]: https://x.ai/ + GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") + // [DeepSeek] + // Stability: development + // + // [DeepSeek]: https://www.deepseek.com/ + GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") + // [Groq] + // Stability: development + // + // [Groq]: https://groq.com/ + GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") + // [Mistral AI] + // Stability: development + // + // [Mistral AI]: https://mistral.ai/ + GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwBatteryCapacityKey is the attribute Key conforming to the + // "hw.battery.capacity" semantic conventions. It represents the design capacity + // in Watts-hours or Amper-hours. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9.3Ah", "50Wh" + HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") + + // HwBatteryChemistryKey is the attribute Key conforming to the + // "hw.battery.chemistry" semantic conventions. It represents the battery + // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Li-ion", "NiMH" + // + // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html + HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") + + // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" + // semantic conventions. It represents the current state of the battery. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwBatteryStateKey = attribute.Key("hw.battery.state") + + // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" + // semantic conventions. It represents the BIOS version of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + HwBiosVersionKey = attribute.Key("hw.bios_version") + + // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" + // semantic conventions. It represents the driver version for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.2.1-3" + HwDriverVersionKey = attribute.Key("hw.driver_version") + + // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" + // semantic conventions. It represents the type of the enclosure (useful for + // modular systems). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Computer", "Storage", "Switch" + HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") + + // HwFirmwareVersionKey is the attribute Key conforming to the + // "hw.firmware_version" semantic conventions. It represents the firmware + // version of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0.1" + HwFirmwareVersionKey = attribute.Key("hw.firmware_version") + + // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic + // conventions. It represents the type of task the GPU is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwGpuTaskKey = attribute.Key("hw.gpu.task") + + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" + // semantic conventions. It represents the type of limit for hardware + // components. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLimitTypeKey = attribute.Key("hw.limit_type") + + // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the + // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID + // Level of the logical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "RAID0+1", "RAID5", "RAID10" + HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") + + // HwLogicalDiskStateKey is the attribute Key conforming to the + // "hw.logical_disk.state" semantic conventions. It represents the state of the + // logical disk space usage. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") + + // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" + // semantic conventions. It represents the type of the memory module. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "DDR4", "DDR5", "LPDDR5" + HwMemoryTypeKey = attribute.Key("hw.memory.type") + + // HwModelKey is the attribute Key conforming to the "hw.model" semantic + // conventions. It represents the descriptive model name of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" + HwModelKey = attribute.Key("hw.model") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwNetworkLogicalAddressesKey is the attribute Key conforming to the + // "hw.network.logical_addresses" semantic conventions. It represents the + // logical addresses of the adapter (e.g. IP address, or WWPN). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "172.16.8.21", "57.11.193.42" + HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") + + // HwNetworkPhysicalAddressKey is the attribute Key conforming to the + // "hw.network.physical_address" semantic conventions. It represents the + // physical address of the adapter (e.g. MAC address, or WWNN). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00-90-F5-E9-7B-36" + HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the + // "hw.physical_disk.smart_attribute" semantic conventions. It represents the + // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute + // of the physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" + // + // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. + HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") + + // HwPhysicalDiskStateKey is the attribute Key conforming to the + // "hw.physical_disk.state" semantic conventions. It represents the state of the + // physical disk endurance utilization. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") + + // HwPhysicalDiskTypeKey is the attribute Key conforming to the + // "hw.physical_disk.type" semantic conventions. It represents the type of the + // physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "HDD", "SSD", "10K" + HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") + + // HwSensorLocationKey is the attribute Key conforming to the + // "hw.sensor_location" semantic conventions. It represents the location of the + // sensor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 + // V3_3", "MAIN_12V", "CPU_VCORE" + HwSensorLocationKey = attribute.Key("hw.sensor_location") + + // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" + // semantic conventions. It represents the serial number of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CNFCP0123456789" + HwSerialNumberKey = attribute.Key("hw.serial_number") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTapeDriveOperationTypeKey is the attribute Key conforming to the + // "hw.tape_drive.operation_type" semantic conventions. It represents the type + // of tape drive operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") + + // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic + // conventions. It represents the vendor name of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" + HwVendorKey = attribute.Key("hw.vendor") +) + +// HwBatteryCapacity returns an attribute KeyValue conforming to the +// "hw.battery.capacity" semantic conventions. It represents the design capacity +// in Watts-hours or Amper-hours. +func HwBatteryCapacity(val string) attribute.KeyValue { + return HwBatteryCapacityKey.String(val) +} + +// HwBatteryChemistry returns an attribute KeyValue conforming to the +// "hw.battery.chemistry" semantic conventions. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func HwBatteryChemistry(val string) attribute.KeyValue { + return HwBatteryChemistryKey.String(val) +} + +// HwBiosVersion returns an attribute KeyValue conforming to the +// "hw.bios_version" semantic conventions. It represents the BIOS version of the +// hardware component. +func HwBiosVersion(val string) attribute.KeyValue { + return HwBiosVersionKey.String(val) +} + +// HwDriverVersion returns an attribute KeyValue conforming to the +// "hw.driver_version" semantic conventions. It represents the driver version for +// the hardware component. +func HwDriverVersion(val string) attribute.KeyValue { + return HwDriverVersionKey.String(val) +} + +// HwEnclosureType returns an attribute KeyValue conforming to the +// "hw.enclosure.type" semantic conventions. It represents the type of the +// enclosure (useful for modular systems). +func HwEnclosureType(val string) attribute.KeyValue { + return HwEnclosureTypeKey.String(val) +} + +// HwFirmwareVersion returns an attribute KeyValue conforming to the +// "hw.firmware_version" semantic conventions. It represents the firmware version +// of the hardware component. +func HwFirmwareVersion(val string) attribute.KeyValue { + return HwFirmwareVersionKey.String(val) +} + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the +// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID +// Level of the logical disk. +func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { + return HwLogicalDiskRaidLevelKey.String(val) +} + +// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" +// semantic conventions. It represents the type of the memory module. +func HwMemoryType(val string) attribute.KeyValue { + return HwMemoryTypeKey.String(val) +} + +// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic +// conventions. It represents the descriptive model name of the hardware +// component. +func HwModel(val string) attribute.KeyValue { + return HwModelKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the +// "hw.network.logical_addresses" semantic conventions. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return HwNetworkLogicalAddressesKey.StringSlice(val) +} + +// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the +// "hw.network.physical_address" semantic conventions. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func HwNetworkPhysicalAddress(val string) attribute.KeyValue { + return HwNetworkPhysicalAddressKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the +// "hw.physical_disk.smart_attribute" semantic conventions. It represents the +// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute +// of the physical disk. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { + return HwPhysicalDiskSmartAttributeKey.String(val) +} + +// HwPhysicalDiskType returns an attribute KeyValue conforming to the +// "hw.physical_disk.type" semantic conventions. It represents the type of the +// physical disk. +func HwPhysicalDiskType(val string) attribute.KeyValue { + return HwPhysicalDiskTypeKey.String(val) +} + +// HwSensorLocation returns an attribute KeyValue conforming to the +// "hw.sensor_location" semantic conventions. It represents the location of the +// sensor. +func HwSensorLocation(val string) attribute.KeyValue { + return HwSensorLocationKey.String(val) +} + +// HwSerialNumber returns an attribute KeyValue conforming to the +// "hw.serial_number" semantic conventions. It represents the serial number of +// the hardware component. +func HwSerialNumber(val string) attribute.KeyValue { + return HwSerialNumberKey.String(val) +} + +// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic +// conventions. It represents the vendor name of the hardware component. +func HwVendor(val string) attribute.KeyValue { + return HwVendorKey.String(val) +} + +// Enum values for hw.battery.state +var ( + // Charging + // Stability: development + HwBatteryStateCharging = HwBatteryStateKey.String("charging") + // Discharging + // Stability: development + HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") +) + +// Enum values for hw.gpu.task +var ( + // Decoder + // Stability: development + HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") + // Encoder + // Stability: development + HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") + // General + // Stability: development + HwGpuTaskGeneral = HwGpuTaskKey.String("general") +) + +// Enum values for hw.limit_type +var ( + // Critical + // Stability: development + HwLimitTypeCritical = HwLimitTypeKey.String("critical") + // Degraded + // Stability: development + HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") + // High Critical + // Stability: development + HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") + // High Degraded + // Stability: development + HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") + // Low Critical + // Stability: development + HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") + // Low Degraded + // Stability: development + HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") + // Maximum + // Stability: development + HwLimitTypeMax = HwLimitTypeKey.String("max") + // Throttled + // Stability: development + HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") + // Turbo + // Stability: development + HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") +) + +// Enum values for hw.logical_disk.state +var ( + // Used + // Stability: development + HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") + // Free + // Stability: development + HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") +) + +// Enum values for hw.physical_disk.state +var ( + // Remaining + // Stability: development + HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") +) + +// Enum values for hw.state +var ( + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") + // Needs Cleaning + // Stability: development + HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") + // OK + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Predicted Failure + // Stability: development + HwStatePredictedFailure = HwStateKey.String("predicted_failure") +) + +// Enum values for hw.tape_drive.operation_type +var ( + // Mount + // Stability: development + HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") + // Unmount + // Stability: development + HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") + // Clean + // Stability: development + HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SContainerStatusReasonKey is the attribute Key conforming to the + // "k8s.container.status.reason" semantic conventions. It represents the reason + // for the container state. Corresponds to the `reason` field of the: + // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ContainerCreating", "CrashLoopBackOff", + // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", + // "OOMKilled", "Completed", "Error", "ContainerCannotRun" + // + // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core + // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core + K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") + + // K8SContainerStatusStateKey is the attribute Key conforming to the + // "k8s.container.status.state" semantic conventions. It represents the state of + // the container. [K8s ContainerState]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "terminated", "running", "waiting" + // + // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core + K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPAMetricTypeKey is the attribute Key conforming to the + // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric + // source for the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Resource", "ContainerResource" + // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. + K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the + // API version of the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "apps/v1", "autoscaling/v2" + // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA + // spec. + K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") + + // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Deployment", "StatefulSet" + // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") + + // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-deployment", "my-statefulset" + // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" + // semantic conventions. It represents the size (identifier) of the K8s huge + // page. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2Mi" + K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeConditionStatusKey is the attribute Key conforming to the + // "k8s.node.condition.status" semantic conventions. It represents the status of + // the condition, one of True, False, Unknown. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "true", "false", "unknown" + // Note: This attribute aligns with the `status` field of the + // [NodeCondition] + // + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") + + // K8SNodeConditionTypeKey is the attribute Key conforming to the + // "k8s.node.condition.type" semantic conventions. It represents the condition + // type of a K8s Node. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Ready", "DiskPressure" + // Note: K8s Node conditions as described + // by [K8s documentation]. + // + // This attribute aligns with the `type` field of the + // [NodeCondition] + // + // The set of possible values is not limited to those listed here. Managed + // Kubernetes environments, + // or custom controllers MAY introduce additional node condition types. + // When this occurs, the exact value as reported by the Kubernetes API SHOULD be + // used. + // + // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the + // "k8s.resourcequota.resource_name" semantic conventions. It represents the + // name of the K8s resource a resource quota defines. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "count/replicationcontrollers" + // Note: The value for this attribute can be either the full + // `count/[.]` string (e.g., count/deployments.apps, + // count/pods), or, for certain core Kubernetes resources, just the resource + // name (e.g., pods, services, configmaps). Both forms are supported by + // Kubernetes for object count quotas. See + // [Kubernetes Resource Quotas documentation] for more details. + // + // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota + K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStorageclassNameKey is the attribute Key conforming to the + // "k8s.storageclass.name" semantic conventions. It represents the name of K8s + // [StorageClass] object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gold.storageclass.storage.k8s.io" + // + // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io + K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// placed on the DaemonSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label placed on +// the DaemonSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// placed on the Deployment, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label placed on +// the Deployment, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAMetricType returns an attribute KeyValue conforming to the +// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric +// source for the horizontal pod autoscaler. +func K8SHPAMetricType(val string) attribute.KeyValue { + return K8SHPAMetricTypeKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the +// API version of the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { + return K8SHPAScaletargetrefAPIVersionKey.String(val) +} + +// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { + return K8SHPAScaletargetrefKindKey.String(val) +} + +// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefName(val string) attribute.KeyValue { + return K8SHPAScaletargetrefNameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SHugepageSize returns an attribute KeyValue conforming to the +// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) +// of the K8s huge page. +func K8SHugepageSize(val string) attribute.KeyValue { + return K8SHugepageSizeKey.String(val) +} + +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation placed +// on the Job, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label placed on the Job, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// placed on the Namespace, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label placed on +// the Namespace, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// placed on the ReplicaSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label placed on +// the ReplicaSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.resource_name" semantic conventions. It represents the name +// of the K8s resource a resource quota defines. +func K8SResourceQuotaResourceName(val string) attribute.KeyValue { + return K8SResourceQuotaResourceNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation placed on the StatefulSet, the `` being the annotation name, +// the value being the annotation value, even if the value is empty. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label placed +// on the StatefulSet, the `` being the label name, the value being the +// label value, even if the value is empty. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStorageclassName returns an attribute KeyValue conforming to the +// "k8s.storageclass.name" semantic conventions. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func K8SStorageclassName(val string) attribute.KeyValue { + return K8SStorageclassNameKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.container.status.reason +var ( + // The container is being created. + // Stability: development + K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") + // The container is in a crash loop back off state. + // Stability: development + K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") + // There was an error creating the container configuration. + // Stability: development + K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") + // There was an error pulling the container image. + // Stability: development + K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") + // The container image pull is in back off state. + // Stability: development + K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") + // The container was killed due to out of memory. + // Stability: development + K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") + // The container has completed execution. + // Stability: development + K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") + // There was an error with the container. + // Stability: development + K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") + // The container cannot run. + // Stability: development + K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") +) + +// Enum values for k8s.container.status.state +var ( + // The container has terminated. + // Stability: development + K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") + // The container is running. + // Stability: development + K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") + // The container is waiting. + // Stability: development + K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") +) + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.node.condition.status +var ( + // condition_true + // Stability: development + K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") + // condition_false + // Stability: development + K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") + // condition_unknown + // Stability: development + K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") +) + +// Enum values for k8s.node.condition.type +var ( + // The node is healthy and ready to accept pods + // Stability: development + K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") + // Pressure exists on the disk size—that is, if the disk capacity is low + // Stability: development + K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") + // Pressure exists on the node memory—that is, if the node memory is low + // Stability: development + K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") + // Pressure exists on the processes—that is, if there are too many processes + // on the node + // Stability: development + K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") + // The network for the node is not correctly configured + // Stability: development + K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: mainframe +const ( + // MainframeLparNameKey is the attribute Key conforming to the + // "mainframe.lpar.name" semantic conventions. It represents the name of the + // logical partition that hosts a systems with a mainframe operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "LPAR01" + MainframeLparNameKey = attribute.Key("mainframe.lpar.name") +) + +// MainframeLparName returns an attribute KeyValue conforming to the +// "mainframe.lpar.name" semantic conventions. It represents the name of the +// logical partition that hosts a systems with a mainframe operating system. +func MainframeLparName(val string) attribute.KeyValue { + return MainframeLparNameKey.String(val) +} + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Notification Service (SNS) + // Stability: development + MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: openai +const ( + // OpenAIRequestServiceTierKey is the attribute Key conforming to the + // "openai.request.service_tier" semantic conventions. It represents the service + // tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") + + // OpenAIResponseServiceTierKey is the attribute Key conforming to the + // "openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") + + // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the + // "openai.response.system_fingerprint" semantic conventions. It represents a + // fingerprint to track any eventual change in the Generative AI environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") +) + +// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "openai.response.service_tier" semantic conventions. It represents the service +// tier used for the response. +func OpenAIResponseServiceTier(val string) attribute.KeyValue { + return OpenAIResponseServiceTierKey.String(val) +} + +// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to +// the "openai.response.system_fingerprint" semantic conventions. It represents a +// fingerprint to track any eventual change in the Generative AI environment. +func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return OpenAIResponseSystemFingerprintKey.String(val) +} + +// Enum values for openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") +) + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("zos") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeSchemaURLKey is the attribute Key conforming to the + // "otel.scope.schema_url" semantic conventions. It represents the schema URL of + // the instrumentation scope. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://opentelemetry.io/schemas/1.31.0" + OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanParentOriginKey is the attribute Key conforming to the + // "otel.span.parent.origin" semantic conventions. It represents the determines + // whether the span has a parent span, and if so, + // [whether it is a remote parent]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeSchemaURL returns an attribute KeyValue conforming to the +// "otel.scope.schema_url" semantic conventions. It represents the schema URL of +// the instrumentation scope. +func OTelScopeSchemaURL(val string) attribute.KeyValue { + return OTelScopeSchemaURLKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // Zipkin span exporter over HTTP + // + // Stability: development + OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") + // Prometheus metric exporter over HTTP with the default text-based format + // + // Stability: development + OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") +) + +// Enum values for otel.span.parent.origin +var ( + // The span does not have a parent, it is a root span + // Stability: development + OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") + // The span has a parent and the parent's span context [isRemote()] is false + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") + // The span has a parent and the parent's span context [isRemote()] is true + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, `` being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // Actual used virtual memory in bytes. + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Namespace: zos +const ( + // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic + // conventions. It represents the System Management Facility (SMF) Identifier + // uniquely identified a z/OS system within a SYSPLEX or mainframe environment + // and is used for system and performance analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYS1" + ZOSSmfIDKey = attribute.Key("zos.smf.id") + + // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" + // semantic conventions. It represents the name of the SYSPLEX to which the z/OS + // system belongs too. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYSPLEX1" + ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") +) + +// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic +// conventions. It represents the System Management Facility (SMF) Identifier +// uniquely identified a z/OS system within a SYSPLEX or mainframe environment +// and is used for system and performance analysis. +func ZOSSmfID(val string) attribute.KeyValue { + return ZOSSmfIDKey.String(val) +} + +// ZOSSysplexName returns an attribute KeyValue conforming to the +// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX +// to which the z/OS system belongs too. +func ZOSSysplexName(val string) attribute.KeyValue { + return ZOSSysplexNameKey.String(val) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go similarity index 65% rename from vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go index 0d1f55a8f..111010321 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.20.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" +// patterns for OpenTelemetry things. This package represents the v1.37.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go new file mode 100644 index 000000000..666bded4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return ErrorTypeOther + } + return ErrorTypeKey.String(value) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go similarity index 74% rename from vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go index f40c97825..e67469a4f 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go index 79843adbb..55bde895d 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go @@ -1,5 +1,8 @@ // Code generated from semantic convention specification. DO NOT EDIT. +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + // Package httpconv provides types and functionality for OpenTelemetry semantic // conventions in the "http" namespace. package httpconv @@ -131,17 +134,14 @@ func (ClientActiveRequests) Description() string { return "Number of active HTTP requests." } -// Add adds incr to the existing count. +// Add adds incr to the existing count for attrs. // // The serverAddress is the server domain name if available without reverse DNS // lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. -// -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin func (m ClientActiveRequests) Add( ctx context.Context, incr int64, @@ -149,6 +149,11 @@ func (m ClientActiveRequests) Add( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + o := addOptPool.Get().(*[]metric.AddOption) defer func() { *o = (*o)[:0] @@ -169,6 +174,23 @@ func (m ClientActiveRequests) Add( m.Int64UpDownCounter.Add(ctx, incr, *o...) } +// AddSet adds incr to the existing count for set. +func (m ClientActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + // AttrURLTemplate returns an optional attribute for the "url.template" semantic // convention. It represents the low-cardinality template of an // [absolute path reference]. @@ -244,17 +266,14 @@ func (ClientConnectionDuration) Description() string { return "The duration of the successfully established outbound HTTP connections." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The serverAddress is the server domain name if available without reverse DNS // lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. -// -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin func (m ClientConnectionDuration) Record( ctx context.Context, val float64, @@ -262,6 +281,11 @@ func (m ClientConnectionDuration) Record( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -282,6 +306,22 @@ func (m ClientConnectionDuration) Record( m.Float64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + // AttrNetworkPeerAddress returns an optional attribute for the // "network.peer.address" semantic convention. It represents the peer address of // the network connection - IP address or Unix domain socket name. @@ -356,7 +396,7 @@ func (ClientOpenConnections) Description() string { return "Number of outbound HTTP connections that are currently active or idle on the client." } -// Add adds incr to the existing count. +// Add adds incr to the existing count for attrs. // // The connectionState is the state of the HTTP connection in the HTTP connection // pool. @@ -364,12 +404,9 @@ func (ClientOpenConnections) Description() string { // The serverAddress is the server domain name if available without reverse DNS // lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. -// -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin func (m ClientOpenConnections) Add( ctx context.Context, incr int64, @@ -378,6 +415,11 @@ func (m ClientOpenConnections) Add( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + o := addOptPool.Get().(*[]metric.AddOption) defer func() { *o = (*o)[:0] @@ -399,6 +441,23 @@ func (m ClientOpenConnections) Add( m.Int64UpDownCounter.Add(ctx, incr, *o...) } +// AddSet adds incr to the existing count for set. +func (m ClientOpenConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + // AttrNetworkPeerAddress returns an optional attribute for the // "network.peer.address" semantic convention. It represents the peer address of // the network connection - IP address or Unix domain socket name. @@ -472,21 +531,17 @@ func (ClientRequestBodySize) Description() string { return "Size of HTTP client request bodies." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // -// The serverAddress is the host identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. // -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length] header. For requests using transport encoding, this should be @@ -501,6 +556,11 @@ func (m ClientRequestBodySize) Record( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -522,6 +582,29 @@ func (m ClientRequestBodySize) Record( m.Int64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -622,20 +705,16 @@ func (ClientRequestDuration) Description() string { return "Duration of HTTP client requests." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // -// The serverAddress is the host identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. -// -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin func (m ClientRequestDuration) Record( ctx context.Context, val float64, @@ -644,6 +723,11 @@ func (m ClientRequestDuration) Record( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -665,6 +749,22 @@ func (m ClientRequestDuration) Record( m.Float64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -765,21 +865,17 @@ func (ClientResponseBodySize) Description() string { return "Size of HTTP client response bodies." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // -// The serverAddress is the host identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. // -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length] header. For requests using transport encoding, this should be @@ -794,6 +890,11 @@ func (m ClientResponseBodySize) Record( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -815,6 +916,29 @@ func (m ClientResponseBodySize) Record( m.Int64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -915,7 +1039,7 @@ func (ServerActiveRequests) Description() string { return "Number of active HTTP server requests." } -// Add adds incr to the existing count. +// Add adds incr to the existing count for attrs. // // The requestMethod is the HTTP request method. // @@ -931,6 +1055,11 @@ func (m ServerActiveRequests) Add( urlScheme string, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + o := addOptPool.Get().(*[]metric.AddOption) defer func() { *o = (*o)[:0] @@ -951,6 +1080,23 @@ func (m ServerActiveRequests) Add( m.Int64UpDownCounter.Add(ctx, incr, *o...) } +// AddSet adds incr to the existing count for set. +func (m ServerActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + // AttrServerAddress returns an optional attribute for the "server.address" // semantic convention. It represents the name of the local HTTP server that // received the request. @@ -1015,7 +1161,7 @@ func (ServerRequestBodySize) Description() string { return "Size of HTTP server request bodies." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // @@ -1038,6 +1184,11 @@ func (m ServerRequestBodySize) Record( urlScheme string, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -1058,6 +1209,29 @@ func (m ServerRequestBodySize) Record( m.Int64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -1168,7 +1342,7 @@ func (ServerRequestDuration) Description() string { return "Duration of HTTP server requests." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // @@ -1184,6 +1358,11 @@ func (m ServerRequestDuration) Record( urlScheme string, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -1204,6 +1383,22 @@ func (m ServerRequestDuration) Record( m.Float64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -1314,7 +1509,7 @@ func (ServerResponseBodySize) Description() string { return "Size of HTTP server response bodies." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // @@ -1337,6 +1532,11 @@ func (m ServerResponseBodySize) Record( urlScheme string, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -1357,6 +1557,29 @@ func (m ServerResponseBodySize) Record( m.Int64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go new file mode 100644 index 000000000..a78eafd1f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go @@ -0,0 +1,2126 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code +// semantic conventions. It represents the gRPC status code of the last gRPC +// requests performed in scope of this export call. +type RPCGRPCStatusCodeAttr int64 + +var ( + // RPCGRPCStatusCodeOk is the OK. + RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0 + // RPCGRPCStatusCodeCancelled is the CANCELLED. + RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1 + // RPCGRPCStatusCodeUnknown is the UNKNOWN. + RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2 + // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT. + RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3 + // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED. + RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4 + // RPCGRPCStatusCodeNotFound is the NOT_FOUND. + RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5 + // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS. + RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6 + // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED. + RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7 + // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED. + RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8 + // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION. + RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9 + // RPCGRPCStatusCodeAborted is the ABORTED. + RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10 + // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE. + RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11 + // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED. + RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12 + // RPCGRPCStatusCodeInternal is the INTERNAL. + RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13 + // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE. + RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14 + // RPCGRPCStatusCodeDataLoss is the DATA_LOSS. + RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15 + // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED. + RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16 +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCGRPCStatusCode returns an optional attribute for the +// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code +// of the last gRPC requests performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue { + return attribute.Int64("rpc.grpc.status_code", int64(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers." +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go similarity index 71% rename from vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go index 95d0210e3..f8a0b7044 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE index 261eeb9e9..f1aee0f11 100644 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index f3aa39813..8763936a8 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) @@ -39,7 +39,7 @@ type autoTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = autoTracerProvider{} -func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { +func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { cfg := NewTracerConfig(opts...) return autoTracer{ name: name, @@ -81,7 +81,7 @@ func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOpt // Expected to be implemented in eBPF. // //go:noinline -func (t *autoTracer) start( +func (*autoTracer) start( ctx context.Context, spanPtr *autoSpan, psc *SpanContext, diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 9c0b720a4..aea11a2b5 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -73,7 +73,7 @@ func (cfg *SpanConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *SpanConfig) StackTrace() bool { return cfg.stackTrace } @@ -154,7 +154,7 @@ func (cfg *EventConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *EventConfig) StackTrace() bool { return cfg.stackTrace } diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go new file mode 100644 index 000000000..1cbef1d4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/hex.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +const ( + // hexLU is a hex lookup table of the 16 lowercase hex digits. + // The character values of the string are indexed at the equivalent + // hexadecimal value they represent. This table efficiently encodes byte data + // into a string representation of hexadecimal. + hexLU = "0123456789abcdef" + + // hexRev is a reverse hex lookup table for lowercase hex digits. + // The table is efficiently decodes a hexadecimal string into bytes. + // Valid hexadecimal characters are indexed at their respective values. All + // other invalid ASCII characters are represented with '\xff'. + // + // The '\xff' character is used as invalid because no valid character has + // the upper 4 bits set. Meaning, an efficient validation can be performed + // over multiple character parsing by checking these bits remain zero. + hexRev = "" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +) diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go index f663547b4..ff0f6eac6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -52,7 +52,7 @@ func Map(key string, value ...Attr) Attr { return Attr{key, MapValue(value...)} } -// Equal returns if a is equal to b. +// Equal reports whether a is equal to b. func (a Attr) Equal(b Attr) bool { return a.Key == b.Key && a.Value.Equal(b.Value) } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go index 7b1ae3c4e..bea56f2e7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -22,7 +22,7 @@ func (tid TraceID) String() string { return hex.EncodeToString(tid[:]) } -// IsEmpty returns false if id contains at least one non-zero byte. +// IsEmpty reports whether the TraceID contains only zero bytes. func (tid TraceID) IsEmpty() bool { return tid == [traceIDSize]byte{} } @@ -50,7 +50,7 @@ func (sid SpanID) String() string { return hex.EncodeToString(sid[:]) } -// IsEmpty returns true if the span ID contains at least one non-zero byte. +// IsEmpty reports whether the SpanID contains only zero bytes. func (sid SpanID) IsEmpty() bool { return sid == [spanIDSize]byte{} } @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go index ae9ce102a..cb7927b81 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -257,10 +257,10 @@ func (v Value) Kind() ValueKind { } } -// Empty returns if v does not hold any value. +// Empty reports whether v does not hold any value. func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } -// Equal returns if v is equal to w. +// Equal reports whether v is equal to w. func (v Value) Equal(w Value) bool { k1 := v.Kind() k2 := w.Kind() diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 0f56e4dbb..400fab123 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} // Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { +func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } @@ -37,7 +37,7 @@ var _ Tracer = noopTracer{} // Start carries forward a non-recording Span, if one is present in the context, otherwise it // creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { +func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) { span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 64a4f1b36..689d220df 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer } // If ctx contains a span context, the returned span will also contain that // span context. If the span context in ctx is for a non-recording span, that // span instance will be returned directly. -func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { +func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { span := trace.SpanFromContext(ctx) // If the parent context contains a non-zero span context, that span diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index d49adf671..ee6f4bcb2 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -4,8 +4,6 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( - "bytes" - "encoding/hex" "encoding/json" ) @@ -38,21 +36,47 @@ var ( _ json.Marshaler = nilTraceID ) -// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// IsValid reports whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) + return t != nilTraceID } // MarshalJSON implements a custom marshal function to encode TraceID // as a hex string. func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) + b := [32 + 2]byte{0: '"', 33: '"'} + h := t.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a TraceID. func (t TraceID) String() string { - return hex.EncodeToString(t[:]) + h := t.hexBytes() + return string(h[:]) +} + +// hexBytes returns the hex string representation form of a TraceID. +func (t TraceID) hexBytes() [32]byte { + return [32]byte{ + hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf], + hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf], + hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf], + hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf], + hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf], + hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf], + hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf], + hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf], + hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf], + hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf], + hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf], + hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf], + hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf], + hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf], + hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf], + hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf], + } } // SpanID is a unique identity of a span in a trace. @@ -63,21 +87,38 @@ var ( _ json.Marshaler = nilSpanID ) -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// IsValid reports whether the SpanID is valid. A valid SpanID does not consist // of zeros only. func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) + return s != nilSpanID } // MarshalJSON implements a custom marshal function to encode SpanID // as a hex string. func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) + b := [16 + 2]byte{0: '"', 17: '"'} + h := s.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a SpanID. func (s SpanID) String() string { - return hex.EncodeToString(s[:]) + b := s.hexBytes() + return string(b[:]) +} + +func (s SpanID) hexBytes() [16]byte { + return [16]byte{ + hexLU[s[0]>>4], hexLU[s[0]&0xf], + hexLU[s[1]>>4], hexLU[s[1]&0xf], + hexLU[s[2]>>4], hexLU[s[2]&0xf], + hexLU[s[3]>>4], hexLU[s[3]&0xf], + hexLU[s[4]>>4], hexLU[s[4]&0xf], + hexLU[s[5]>>4], hexLU[s[5]&0xf], + hexLU[s[6]>>4], hexLU[s[6]&0xf], + hexLU[s[7]>>4], hexLU[s[7]&0xf], + } } // TraceIDFromHex returns a TraceID from a hex string if it is compliant with @@ -85,65 +126,58 @@ func (s SpanID) String() string { // https://www.w3.org/TR/trace-context/#trace-id // nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} if len(h) != 32 { - return t, errInvalidTraceIDLength + return [16]byte{}, errInvalidTraceIDLength } - - if err := decodeHex(h, t[:]); err != nil { - return t, err + var b [16]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - - if !t.IsValid() { - return t, errNilTraceID + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [16]byte{}, errInvalidHexID + } + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [16]byte{}, errNilTraceID } - return t, nil + return b, nil } // SpanIDFromHex returns a SpanID from a hex string if it is compliant // with the w3c trace-context specification. // See more at https://www.w3.org/TR/trace-context/#parent-id func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err + return [8]byte{}, errInvalidSpanIDLength } - - if !s.IsValid() { - return s, errNilSpanID + var b [8]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [8]byte{}, errInvalidHexID } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [8]byte{}, errNilSpanID } - - copy(b, decoded) - return nil + return b, nil } // TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. -// IsSampled returns if the sampling bit is set in the TraceFlags. +// IsSampled reports whether the sampling bit is set in the TraceFlags. func (tf TraceFlags) IsSampled() bool { return tf&FlagsSampled == FlagsSampled } @@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) + b := [2 + 2]byte{0: '"', 3: '"'} + h := tf.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) + h := tf.hexBytes() + return string(h[:]) +} + +func (tf TraceFlags) hexBytes() [2]byte { + return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]} } // SpanContextConfig contains mutable fields usable for constructing @@ -201,13 +243,13 @@ type SpanContext struct { var _ json.Marshaler = SpanContext{} -// IsValid returns if the SpanContext is valid. A valid span context has a +// IsValid reports whether the SpanContext is valid. A valid span context has a // valid TraceID and SpanID. func (sc SpanContext) IsValid() bool { return sc.HasTraceID() && sc.HasSpanID() } -// IsRemote indicates whether the SpanContext represents a remotely-created Span. +// IsRemote reports whether the SpanContext represents a remotely-created Span. func (sc SpanContext) IsRemote() bool { return sc.remote } @@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID { return sc.traceID } -// HasTraceID checks if the SpanContext has a valid TraceID. +// HasTraceID reports whether the SpanContext has a valid TraceID. func (sc SpanContext) HasTraceID() bool { return sc.traceID.IsValid() } @@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID { return sc.spanID } -// HasSpanID checks if the SpanContext has a valid SpanID. +// HasSpanID reports whether the SpanContext has a valid SpanID. func (sc SpanContext) HasSpanID() bool { return sc.spanID.IsValid() } @@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags { return sc.traceFlags } -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags. func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } @@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext { } } -// Equal is a predicate that determines whether two SpanContext values are equal. +// Equal reports whether two SpanContext values are equal. func (sc SpanContext) Equal(other SpanContext) bool { return sc.traceID == other.traceID && sc.spanID == other.spanID && diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index dc5e34cad..073adae2f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool { // // param n is remain part length, should be 255 in simple-key or 13 in system-id. func checkKeyPart(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } first := key[0] // key's first char @@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool { // // param n is remain part length, should be 240 exactly. func checkKeyTenant(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) @@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) { for ts != "" { var memberStr string memberStr, ts, _ = strings.Cut(ts, listDelimiters) - if len(memberStr) == 0 { + if memberStr == "" { continue } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7afe92b59..bcaa5aa53 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 9d4742a17..07145e254 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.37.0 + version: v1.38.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.59.0 + version: v0.60.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.13.0 + version: v0.14.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,7 +36,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.12 + version: v0.0.13 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml deleted file mode 100644 index 6d4d1be7b..000000000 --- a/vendor/go.uber.org/multierr/.codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore deleted file mode 100644 index b9a05e3da..000000000 --- a/vendor/go.uber.org/multierr/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/vendor -cover.html -cover.out -/bin diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md deleted file mode 100644 index f8177b978..000000000 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ /dev/null @@ -1,95 +0,0 @@ -Releases -======== - -v1.11.0 (2023-03-28) -==================== -- `Errors` now supports any error that implements multiple-error - interface. -- Add `Every` function to allow checking if all errors in the chain - satisfies `errors.Is` against the target error. - -v1.10.0 (2023-03-08) -==================== - -- Comply with Go 1.20's multiple-error interface. -- Drop Go 1.18 support. - Per the support policy, only Go 1.19 and 1.20 are supported now. -- Drop all non-test external dependencies. - -v1.9.0 (2022-12-12) -=================== - -- Add `AppendFunc` that allow passsing functions to similar to - `AppendInvoke`. - -- Bump up yaml.v3 dependency to 3.0.1. - -v1.8.0 (2022-02-28) -=================== - -- `Combine`: perform zero allocations when there are no errors. - - -v1.7.0 (2021-05-06) -=================== - -- Add `AppendInvoke` to append into errors from `defer` blocks. - - -v1.6.0 (2020-09-14) -=================== - -- Actually drop library dependency on development-time tooling. - - -v1.5.0 (2020-02-24) -=================== - -- Drop library dependency on development-time tooling. - - -v1.4.0 (2019-11-04) -=================== - -- Add `AppendInto` function to more ergonomically build errors inside a - loop. - - -v1.3.0 (2019-10-29) -=================== - -- Switch to Go modules. - - -v1.2.0 (2019-09-26) -=================== - -- Support extracting and matching against wrapped errors with `errors.As` - and `errors.Is`. - - -v1.1.0 (2017-06-30) -=================== - -- Added an `Errors(error) []error` function to extract the underlying list of - errors for a multierr error. - - -v1.0.0 (2017-05-31) -=================== - -No changes since v0.2.0. This release is committing to making no breaking -changes to the current API in the 1.X series. - - -v0.2.0 (2017-04-11) -=================== - -- Repeatedly appending to the same error is now faster due to fewer - allocations. - - -v0.1.0 (2017-31-03) -=================== - -- Initial release diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile deleted file mode 100644 index dcb6fe723..000000000 --- a/vendor/go.uber.org/multierr/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -# Directory to put `go install`ed binaries in. -export GOBIN ?= $(shell pwd)/bin - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) - -.PHONY: golint -golint: - @cd tools && go install golang.org/x/lint/golint - @$(GOBIN)/golint ./... - -.PHONY: staticcheck -staticcheck: - @cd tools && go install honnef.co/go/tools/cmd/staticcheck - @$(GOBIN)/staticcheck ./... - -.PHONY: lint -lint: gofmt golint staticcheck - -.PHONY: cover -cover: - go test -race -coverprofile=cover.out -coverpkg=./... -v ./... - go tool cover -html=cover.out -o cover.html diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md deleted file mode 100644 index 5ab6ac40f..000000000 --- a/vendor/go.uber.org/multierr/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -`multierr` allows combining one or more Go `error`s together. - -## Features - -- **Idiomatic**: - multierr follows best practices in Go, and keeps your code idiomatic. - - It keeps the underlying error type hidden, - allowing you to deal in `error` values exclusively. - - It provides APIs to safely append into an error from a `defer` statement. -- **Performant**: - multierr is optimized for performance: - - It avoids allocations where possible. - - It utilizes slice resizing semantics to optimize common cases - like appending into the same error object from a loop. -- **Interoperable**: - multierr interoperates with the Go standard library's error APIs seamlessly: - - The `errors.Is` and `errors.As` functions *just work*. -- **Lightweight**: - multierr comes with virtually no dependencies. - -## Installation - -```bash -go get -u go.uber.org/multierr@latest -``` - -## Status - -Stable: No breaking changes will be made before 2.0. - -------------------------------------------------------------------------------- - -Released under the [MIT License]. - -[MIT License]: LICENSE.txt -[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr -[doc]: https://pkg.go.dev/go.uber.org/multierr -[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg -[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml -[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go deleted file mode 100644 index 3a828b2df..000000000 --- a/vendor/go.uber.org/multierr/error.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright (c) 2017-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package multierr allows combining one or more errors together. -// -// # Overview -// -// Errors can be combined with the use of the Combine function. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) -// -// If only two errors are being combined, the Append function may be used -// instead. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The underlying list of errors for a returned error object may be retrieved -// with the Errors function. -// -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:", errors) -// } -// -// # Appending from a loop -// -// You sometimes need to append into an error from a loop. -// -// var err error -// for _, item := range items { -// err = multierr.Append(err, process(item)) -// } -// -// Cases like this may require knowledge of whether an individual instance -// failed. This usually requires introduction of a new variable. -// -// var err error -// for _, item := range items { -// if perr := process(item); perr != nil { -// log.Warn("skipping item", item) -// err = multierr.Append(err, perr) -// } -// } -// -// multierr includes AppendInto to simplify cases like this. -// -// var err error -// for _, item := range items { -// if multierr.AppendInto(&err, process(item)) { -// log.Warn("skipping item", item) -// } -// } -// -// This will append the error into the err variable, and return true if that -// individual error was non-nil. -// -// See [AppendInto] for more information. -// -// # Deferred Functions -// -// Go makes it possible to modify the return value of a function in a defer -// block if the function was using named returns. This makes it possible to -// record resource cleanup failures from deferred blocks. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } -// -// multierr provides the Invoker type and AppendInvoke function to make cases -// like the above simpler and obviate the need for a closure. The following is -// roughly equivalent to the example above. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(conn)) -// // ... -// } -// -// See [AppendInvoke] and [Invoker] for more information. -// -// NOTE: If you're modifying an error from inside a defer, you MUST use a named -// return value for that function. -// -// # Advanced Usage -// -// Errors returned by Combine and Append MAY implement the following -// interface. -// -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } -// -// Note that if you need access to list of errors behind a multierr error, you -// should prefer using the Errors function. That said, if you need cheap -// read-only access to the underlying errors slice, you can attempt to cast -// the error to this interface. You MUST handle the failure case gracefully -// because errors returned by Combine and Append are not guaranteed to -// implement this interface. -// -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } -package multierr // import "go.uber.org/multierr" - -import ( - "bytes" - "errors" - "fmt" - "io" - "strings" - "sync" - "sync/atomic" -) - -var ( - // Separator for single-line error messages. - _singlelineSeparator = []byte("; ") - - // Prefix for multi-line messages - _multilinePrefix = []byte("the following errors occurred:") - - // Prefix for the first and following lines of an item in a list of - // multi-line error messages. - // - // For example, if a single item is: - // - // foo - // bar - // - // It will become, - // - // - foo - // bar - _multilineSeparator = []byte("\n - ") - _multilineIndent = []byte(" ") -) - -// _bufferPool is a pool of bytes.Buffers. -var _bufferPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, -} - -type errorGroup interface { - Errors() []error -} - -// Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, a nil slice is returned. -// -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) -// -// If the error is not composed of other errors, the returned slice contains -// just the error that was passed in. -// -// Callers of this function are free to modify the returned slice. -func Errors(err error) []error { - return extractErrors(err) -} - -// multiError is an error that holds one or more errors. -// -// An instance of this is guaranteed to be non-empty and flattened. That is, -// none of the errors inside multiError are other multiErrors. -// -// multiError formats to a semi-colon delimited list of error messages with -// %v and with a more readable multi-line format with %+v. -type multiError struct { - copyNeeded atomic.Bool - errors []error -} - -// Errors returns the list of underlying errors. -// -// This slice MUST NOT be modified. -func (merr *multiError) Errors() []error { - if merr == nil { - return nil - } - return merr.errors -} - -func (merr *multiError) Error() string { - if merr == nil { - return "" - } - - buff := _bufferPool.Get().(*bytes.Buffer) - buff.Reset() - - merr.writeSingleline(buff) - - result := buff.String() - _bufferPool.Put(buff) - return result -} - -// Every compares every error in the given err against the given target error -// using [errors.Is], and returns true only if every comparison returned true. -func Every(err error, target error) bool { - for _, e := range extractErrors(err) { - if !errors.Is(e, target) { - return false - } - } - return true -} - -func (merr *multiError) Format(f fmt.State, c rune) { - if c == 'v' && f.Flag('+') { - merr.writeMultiline(f) - } else { - merr.writeSingleline(f) - } -} - -func (merr *multiError) writeSingleline(w io.Writer) { - first := true - for _, item := range merr.errors { - if first { - first = false - } else { - w.Write(_singlelineSeparator) - } - io.WriteString(w, item.Error()) - } -} - -func (merr *multiError) writeMultiline(w io.Writer) { - w.Write(_multilinePrefix) - for _, item := range merr.errors { - w.Write(_multilineSeparator) - writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) - } -} - -// Writes s to the writer with the given prefix added before each line after -// the first. -func writePrefixLine(w io.Writer, prefix []byte, s string) { - first := true - for len(s) > 0 { - if first { - first = false - } else { - w.Write(prefix) - } - - idx := strings.IndexByte(s, '\n') - if idx < 0 { - idx = len(s) - 1 - } - - io.WriteString(w, s[:idx+1]) - s = s[idx+1:] - } -} - -type inspectResult struct { - // Number of top-level non-nil errors - Count int - - // Total number of errors including multiErrors - Capacity int - - // Index of the first non-nil error in the list. Value is meaningless if - // Count is zero. - FirstErrorIdx int - - // Whether the list contains at least one multiError - ContainsMultiError bool -} - -// Inspects the given slice of errors so that we can efficiently allocate -// space for it. -func inspect(errors []error) (res inspectResult) { - first := true - for i, err := range errors { - if err == nil { - continue - } - - res.Count++ - if first { - first = false - res.FirstErrorIdx = i - } - - if merr, ok := err.(*multiError); ok { - res.Capacity += len(merr.errors) - res.ContainsMultiError = true - } else { - res.Capacity++ - } - } - return -} - -// fromSlice converts the given list of errors into a single error. -func fromSlice(errors []error) error { - // Don't pay to inspect small slices. - switch len(errors) { - case 0: - return nil - case 1: - return errors[0] - } - - res := inspect(errors) - switch res.Count { - case 0: - return nil - case 1: - // only one non-nil entry - return errors[res.FirstErrorIdx] - case len(errors): - if !res.ContainsMultiError { - // Error list is flat. Make a copy of it - // Otherwise "errors" escapes to the heap - // unconditionally for all other cases. - // This lets us optimize for the "no errors" case. - out := append(([]error)(nil), errors...) - return &multiError{errors: out} - } - } - - nonNilErrs := make([]error, 0, res.Capacity) - for _, err := range errors[res.FirstErrorIdx:] { - if err == nil { - continue - } - - if nested, ok := err.(*multiError); ok { - nonNilErrs = append(nonNilErrs, nested.errors...) - } else { - nonNilErrs = append(nonNilErrs, err) - } - } - - return &multiError{errors: nonNilErrs} -} - -// Combine combines the passed errors into a single error. -// -// If zero arguments were passed or if all items are nil, a nil error is -// returned. -// -// Combine(nil, nil) // == nil -// -// If only a single error was passed, it is returned as-is. -// -// Combine(err) // == err -// -// Combine skips over nil arguments so this function may be used to combine -// together errors from operations that fail independently of each other. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) -// -// If any of the passed errors is a multierr error, it will be flattened along -// with the other errors. -// -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) -// -// The returned error formats into a readable multi-line error message if -// formatted with %+v. -// -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) -func Combine(errors ...error) error { - return fromSlice(errors) -} - -// Append appends the given errors together. Either value may be nil. -// -// This function is a specialization of Combine for the common case where -// there are only two errors. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The following pattern may also be used to record failure of deferred -// operations without losing information about the original error. -// -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() -// -// Note that the variable MUST be a named return to append an error to it from -// the defer statement. See also [AppendInvoke]. -func Append(left error, right error) error { - switch { - case left == nil: - return right - case right == nil: - return left - } - - if _, ok := right.(*multiError); !ok { - if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { - // Common case where the error on the left is constantly being - // appended to. - errs := append(l.errors, right) - return &multiError{errors: errs} - } else if !ok { - // Both errors are single errors. - return &multiError{errors: []error{left, right}} - } - } - - // Either right or both, left and right, are multiErrors. Rely on usual - // expensive logic. - errors := [2]error{left, right} - return fromSlice(errors[0:]) -} - -// AppendInto appends an error into the destination of an error pointer and -// returns whether the error being appended was non-nil. -// -// var err error -// multierr.AppendInto(&err, r.Close()) -// multierr.AppendInto(&err, w.Close()) -// -// The above is equivalent to, -// -// err := multierr.Append(r.Close(), w.Close()) -// -// As AppendInto reports whether the provided error was non-nil, it may be -// used to build a multierr error in a loop more ergonomically. For example: -// -// var err error -// for line := range lines { -// var item Item -// if multierr.AppendInto(&err, parse(line, &item)) { -// continue -// } -// items = append(items, item) -// } -// -// Compare this with a version that relies solely on Append: -// -// var err error -// for line := range lines { -// var item Item -// if parseErr := parse(line, &item); parseErr != nil { -// err = multierr.Append(err, parseErr) -// continue -// } -// items = append(items, item) -// } -func AppendInto(into *error, err error) (errored bool) { - if into == nil { - // We panic if 'into' is nil. This is not documented above - // because suggesting that the pointer must be non-nil may - // confuse users into thinking that the error that it points - // to must be non-nil. - panic("misuse of multierr.AppendInto: into pointer must not be nil") - } - - if err == nil { - return false - } - *into = Append(*into, err) - return true -} - -// Invoker is an operation that may fail with an error. Use it with -// AppendInvoke to append the result of calling the function into an error. -// This allows you to conveniently defer capture of failing operations. -// -// See also, [Close] and [Invoke]. -type Invoker interface { - Invoke() error -} - -// Invoke wraps a function which may fail with an error to match the Invoker -// interface. Use it to supply functions matching this signature to -// AppendInvoke. -// -// For example, -// -// func processReader(r io.Reader) (err error) { -// scanner := bufio.NewScanner(r) -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// for scanner.Scan() { -// // ... -// } -// // ... -// } -// -// In this example, the following line will construct the Invoker right away, -// but defer the invocation of scanner.Err() until the function returns. -// -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// -// Note that the error you're appending to from the defer statement MUST be a -// named return. -type Invoke func() error - -// Invoke calls the supplied function and returns its result. -func (i Invoke) Invoke() error { return i() } - -// Close builds an Invoker that closes the provided io.Closer. Use it with -// AppendInvoke to close io.Closers and append their results into an error. -// -// For example, -// -// func processFile(path string) (err error) { -// f, err := os.Open(path) -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// return processReader(f) -// } -// -// In this example, multierr.Close will construct the Invoker right away, but -// defer the invocation of f.Close until the function returns. -// -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// -// Note that the error you're appending to from the defer statement MUST be a -// named return. -func Close(closer io.Closer) Invoker { - return Invoke(closer.Close) -} - -// AppendInvoke appends the result of calling the given Invoker into the -// provided error pointer. Use it with named returns to safely defer -// invocation of fallible operations until a function returns, and capture the -// resulting errors. -// -// func doSomething(...) (err error) { -// // ... -// f, err := openFile(..) -// if err != nil { -// return err -// } -// -// // multierr will call f.Close() when this function returns and -// // if the operation fails, its append its error into the -// // returned error. -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// -// scanner := bufio.NewScanner(f) -// // Similarly, this scheduled scanner.Err to be called and -// // inspected when the function returns and append its error -// // into the returned error. -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// -// // ... -// } -// -// NOTE: If used with a defer, the error variable MUST be a named return. -// -// Without defer, AppendInvoke behaves exactly like AppendInto. -// -// err := // ... -// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) -// -// // ...is roughly equivalent to... -// -// err := // ... -// multierr.AppendInto(&err, foo()) -// -// The advantage of the indirection introduced by Invoker is to make it easy -// to defer the invocation of a function. Without this indirection, the -// invoked function will be evaluated at the time of the defer block rather -// than when the function returns. -// -// // BAD: This is likely not what the caller intended. This will evaluate -// // foo() right away and append its result into the error when the -// // function returns. -// defer multierr.AppendInto(&err, foo()) -// -// // GOOD: This will defer invocation of foo unutil the function returns. -// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) -// -// multierr provides a few Invoker implementations out of the box for -// convenience. See [Invoker] for more information. -func AppendInvoke(into *error, invoker Invoker) { - AppendInto(into, invoker.Invoke()) -} - -// AppendFunc is a shorthand for [AppendInvoke]. -// It allows using function or method value directly -// without having to wrap it into an [Invoker] interface. -// -// func doSomething(...) (err error) { -// w, err := startWorker(...) -// if err != nil { -// return err -// } -// -// // multierr will call w.Stop() when this function returns and -// // if the operation fails, it appends its error into the -// // returned error. -// defer multierr.AppendFunc(&err, w.Stop) -// } -func AppendFunc(into *error, fn func() error) { - AppendInvoke(into, Invoke(fn)) -} diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go deleted file mode 100644 index a173f9c25..000000000 --- a/vendor/go.uber.org/multierr/error_post_go120.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2017-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.20 -// +build go1.20 - -package multierr - -// Unwrap returns a list of errors wrapped by this multierr. -func (merr *multiError) Unwrap() []error { - return merr.Errors() -} - -type multipleErrors interface { - Unwrap() []error -} - -func extractErrors(err error) []error { - if err == nil { - return nil - } - - // check if the given err is an Unwrapable error that - // implements multipleErrors interface. - eg, ok := err.(multipleErrors) - if !ok { - return []error{err} - } - - return append(([]error)(nil), eg.Unwrap()...) -} diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go deleted file mode 100644 index 93872a3fc..000000000 --- a/vendor/go.uber.org/multierr/error_pre_go120.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2017-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build !go1.20 -// +build !go1.20 - -package multierr - -import "errors" - -// Versions of Go before 1.20 did not support the Unwrap() []error method. -// This provides a similar behavior by implementing the Is(..) and As(..) -// methods. -// See the errors.Join proposal for details: -// https://github.com/golang/go/issues/53435 - -// As attempts to find the first error in the error list that matches the type -// of the value that target points to. -// -// This function allows errors.As to traverse the values stored on the -// multierr error. -func (merr *multiError) As(target interface{}) bool { - for _, err := range merr.Errors() { - if errors.As(err, target) { - return true - } - } - return false -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored on the -// multierr error. -func (merr *multiError) Is(target error) bool { - for _, err := range merr.Errors() { - if errors.Is(err, target) { - return true - } - } - return false -} - -func extractErrors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - return append(([]error)(nil), eg.Errors()...) -} diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE new file mode 100644 index 000000000..2683e4bb1 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v3/NOTICE b/vendor/go.yaml.in/yaml/v3/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 000000000..15a85a635 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go new file mode 100644 index 000000000..05fd305da --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 000000000..02e2b17bf --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go new file mode 100644 index 000000000..ab4e03ba7 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -0,0 +1,2054 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go new file mode 100644 index 000000000..de9e72a3e --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go new file mode 100644 index 000000000..25fe82363 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/parserc.go @@ -0,0 +1,1274 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go new file mode 100644 index 000000000..56af24536 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go new file mode 100644 index 000000000..64ae88805 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go new file mode 100644 index 000000000..30b1f0892 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -0,0 +1,3040 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go new file mode 100644 index 000000000..9210ece7e --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go new file mode 100644 index 000000000..266d0b092 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 000000000..0b101cd20 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go new file mode 100644 index 000000000..f59aa40f6 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -0,0 +1,811 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yaml_parser_set_input(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yaml_emitter_set_output(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go new file mode 100644 index 000000000..dea1ba961 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 764221562..27c6e653d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -14,7 +14,7 @@ github.com/bytecodealliance/wasmtime-go/v3/build/linux-x86_64 github.com/bytecodealliance/wasmtime-go/v3/build/macos-aarch64 github.com/bytecodealliance/wasmtime-go/v3/build/macos-x86_64 github.com/bytecodealliance/wasmtime-go/v3/build/windows-x86_64 -# github.com/cenkalti/backoff/v5 v5.0.2 +# github.com/cenkalti/backoff/v5 v5.0.3 ## explicit; go 1.23 github.com/cenkalti/backoff/v5 # github.com/cespare/xxhash/v2 v2.3.0 @@ -120,7 +120,7 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/goccy/go-json v0.10.3 +# github.com/goccy/go-json v0.10.5 ## explicit; go 1.19 github.com/goccy/go-json github.com/goccy/go-json/internal/decoder @@ -143,11 +143,25 @@ github.com/google/flatbuffers/go # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 ## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities +# github.com/hashicorp/golang-lru/v2 v2.0.7 +## explicit; go 1.18 +github.com/hashicorp/golang-lru/v2 +github.com/hashicorp/golang-lru/v2/internal +github.com/hashicorp/golang-lru/v2/simplelru +# github.com/huandu/go-clone v1.7.3 +## explicit; go 1.13 +github.com/huandu/go-clone +# github.com/huandu/go-sqlbuilder v1.37.0 +## explicit; go 1.18 +github.com/huandu/go-sqlbuilder +# github.com/huandu/xstrings v1.4.0 +## explicit; go 1.12 +github.com/huandu/xstrings # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -167,17 +181,24 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/lestrrat-go/blackmagic v1.0.4 ## explicit; go 1.23 github.com/lestrrat-go/blackmagic +# github.com/lestrrat-go/dsig v1.0.0 +## explicit; go 1.23.0 +github.com/lestrrat-go/dsig +github.com/lestrrat-go/dsig/internal/ecutil +# github.com/lestrrat-go/dsig-secp256k1 v1.0.0 +## explicit; go 1.23.0 +github.com/lestrrat-go/dsig-secp256k1 # github.com/lestrrat-go/httpcc v1.0.1 ## explicit; go 1.16 github.com/lestrrat-go/httpcc -# github.com/lestrrat-go/httprc/v3 v3.0.0 -## explicit; go 1.23 +# github.com/lestrrat-go/httprc/v3 v3.0.1 +## explicit; go 1.23.0 github.com/lestrrat-go/httprc/v3 github.com/lestrrat-go/httprc/v3/errsink github.com/lestrrat-go/httprc/v3/proxysink github.com/lestrrat-go/httprc/v3/tracesink -# github.com/lestrrat-go/jwx/v3 v3.0.10 -## explicit; go 1.23.0 +# github.com/lestrrat-go/jwx/v3 v3.0.11 +## explicit; go 1.24.4 github.com/lestrrat-go/jwx/v3 github.com/lestrrat-go/jwx/v3/cert github.com/lestrrat-go/jwx/v3/internal/base64 @@ -224,7 +245,7 @@ github.com/munnerz/goautoneg # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/open-policy-agent/opa v1.8.0 +# github.com/open-policy-agent/opa v1.9.0 ## explicit; go 1.24.6 github.com/open-policy-agent/opa/capabilities github.com/open-policy-agent/opa/cmd @@ -234,6 +255,7 @@ github.com/open-policy-agent/opa/cmd/internal/exec github.com/open-policy-agent/opa/internal/bundle github.com/open-policy-agent/opa/internal/bundle/inspect github.com/open-policy-agent/opa/internal/cidr/merge +github.com/open-policy-agent/opa/internal/compile github.com/open-policy-agent/opa/internal/compiler github.com/open-policy-agent/opa/internal/compiler/wasm github.com/open-policy-agent/opa/internal/compiler/wasm/opa @@ -250,6 +272,7 @@ github.com/open-policy-agent/opa/internal/gojsonschema github.com/open-policy-agent/opa/internal/json/patch github.com/open-policy-agent/opa/internal/lcss github.com/open-policy-agent/opa/internal/leb128 +github.com/open-policy-agent/opa/internal/levenshtein github.com/open-policy-agent/opa/internal/logging github.com/open-policy-agent/opa/internal/merge github.com/open-policy-agent/opa/internal/pathwatcher @@ -267,6 +290,7 @@ github.com/open-policy-agent/opa/internal/runtime/init github.com/open-policy-agent/opa/internal/semver github.com/open-policy-agent/opa/internal/strings github.com/open-policy-agent/opa/internal/strvals +github.com/open-policy-agent/opa/internal/ucast github.com/open-policy-agent/opa/internal/uuid github.com/open-policy-agent/opa/internal/version github.com/open-policy-agent/opa/internal/wasm/constant @@ -318,6 +342,7 @@ github.com/open-policy-agent/opa/v1/plugins/status github.com/open-policy-agent/opa/v1/profiler github.com/open-policy-agent/opa/v1/refactor github.com/open-policy-agent/opa/v1/rego +github.com/open-policy-agent/opa/v1/rego/compile github.com/open-policy-agent/opa/v1/repl github.com/open-policy-agent/opa/v1/resolver github.com/open-policy-agent/opa/v1/resolver/wasm @@ -326,6 +351,7 @@ github.com/open-policy-agent/opa/v1/schemas github.com/open-policy-agent/opa/v1/sdk github.com/open-policy-agent/opa/v1/server github.com/open-policy-agent/opa/v1/server/authorizer +github.com/open-policy-agent/opa/v1/server/failtracer github.com/open-policy-agent/opa/v1/server/handlers github.com/open-policy-agent/opa/v1/server/identifier github.com/open-policy-agent/opa/v1/server/types @@ -392,19 +418,19 @@ github.com/prometheus/client_model/go ## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model -# github.com/prometheus/procfs v0.16.1 +# github.com/prometheus/procfs v0.17.0 ## explicit; go 1.23.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 +# github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 ## explicit github.com/rcrowley/go-metrics # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/sagikazarmark/locafero v0.7.0 -## explicit; go 1.21 +# github.com/sagikazarmark/locafero v0.11.0 +## explicit; go 1.23.0 github.com/sagikazarmark/locafero # github.com/segmentio/asm v1.2.0 ## explicit; go 1.18 @@ -418,31 +444,31 @@ github.com/segmentio/asm/internal/unsafebytes # github.com/sergi/go-diff v1.4.0 ## explicit; go 1.13 github.com/sergi/go-diff/diffmatchpatch -# github.com/sirupsen/logrus v1.9.3 +# github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/sourcegraph/conc v0.3.0 -## explicit; go 1.19 +# github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 +## explicit; go 1.20 github.com/sourcegraph/conc -github.com/sourcegraph/conc/internal/multierror -github.com/sourcegraph/conc/iter github.com/sourcegraph/conc/panics -# github.com/spf13/afero v1.12.0 -## explicit; go 1.21 +github.com/sourcegraph/conc/pool +# github.com/spf13/afero v1.15.0 +## explicit; go 1.23.0 github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.7.1 -## explicit; go 1.19 +# github.com/spf13/cast v1.10.0 +## explicit; go 1.21.0 github.com/spf13/cast -# github.com/spf13/cobra v1.9.1 +github.com/spf13/cast/internal +# github.com/spf13/cobra v1.10.1 ## explicit; go 1.15 github.com/spf13/cobra -# github.com/spf13/pflag v1.0.7 +# github.com/spf13/pflag v1.0.10 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.20.1 -## explicit; go 1.21.0 +# github.com/spf13/viper v1.21.0 +## explicit; go 1.23.0 github.com/spf13/viper github.com/spf13/viper/internal/encoding/dotenv github.com/spf13/viper/internal/encoding/json @@ -485,16 +511,15 @@ go.opentelemetry.io/auto/sdk/internal/telemetry ## explicit; go 1.23.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil # go.opentelemetry.io/contrib/propagators/b3 v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/propagators/b3 -# go.opentelemetry.io/otel v1.37.0 +# go.opentelemetry.io/otel v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -505,37 +530,38 @@ go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal -go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.34.0 -go.opentelemetry.io/otel/semconv/v1.34.0/httpconv go.opentelemetry.io/otel/semconv/v1.34.0/rpcconv +go.opentelemetry.io/otel/semconv/v1.37.0 +go.opentelemetry.io/otel/semconv/v1.37.0/httpconv +go.opentelemetry.io/otel/semconv/v1.37.0/otelconv go.opentelemetry.io/otel/semconv/v1.7.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/metric v1.37.0 +# go.opentelemetry.io/otel/metric v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.37.0 +# go.opentelemetry.io/otel/sdk v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -543,14 +569,15 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace +go.opentelemetry.io/otel/sdk/trace/internal/x go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.37.0 +# go.opentelemetry.io/otel/trace v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.7.0 +# go.opentelemetry.io/proto/otlp v1.7.1 ## explicit; go 1.23.0 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 @@ -561,12 +588,12 @@ go.opentelemetry.io/proto/otlp/trace/v1 go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs -# go.uber.org/multierr v1.11.0 -## explicit; go 1.19 -go.uber.org/multierr # go.yaml.in/yaml/v2 v2.4.2 ## explicit; go 1.15 go.yaml.in/yaml/v2 +# go.yaml.in/yaml/v3 v3.0.4 +## explicit; go 1.16 +go.yaml.in/yaml/v3 # golang.org/x/crypto v0.42.0 ## explicit; go 1.24.0 golang.org/x/crypto/pbkdf2 @@ -617,8 +644,8 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.12.0 -## explicit; go 1.23.0 +# golang.org/x/time v0.13.0 +## explicit; go 1.24.0 golang.org/x/time/rate # golang.org/x/tools v0.37.0 ## explicit; go 1.24.0 @@ -642,10 +669,10 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 +# google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails