diff --git a/go.mod b/go.mod index 79753126..3159f727 100644 --- a/go.mod +++ b/go.mod @@ -3,17 +3,17 @@ module github.com/datarhei/core/v16 go 1.18 require ( - github.com/99designs/gqlgen v0.17.31 + github.com/99designs/gqlgen v0.17.33 github.com/Masterminds/semver/v3 v3.2.1 - github.com/adhocore/gronx v1.1.2 + github.com/adhocore/gronx v1.6.3 github.com/atrox/haikunatorgo/v2 v2.0.1 - github.com/caddyserver/certmagic v0.17.2 - github.com/casbin/casbin/v2 v2.69.1 + github.com/caddyserver/certmagic v0.18.0 + github.com/casbin/casbin/v2 v2.71.0 github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614141756-a25a5fc3c60e - github.com/datarhei/gosrt v0.4.1 + github.com/datarhei/gosrt v0.5.0 github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a github.com/fujiwara/shapeio v1.0.0 - github.com/go-playground/validator/v10 v10.14.0 + github.com/go-playground/validator/v10 v10.14.1 github.com/gobwas/glob v0.2.3 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/uuid v1.3.0 @@ -26,19 +26,19 @@ require ( github.com/lestrrat-go/strftime v1.0.6 github.com/lithammer/shortuuid/v4 v4.0.0 github.com/mattn/go-isatty v0.0.19 - github.com/minio/minio-go/v7 v7.0.55 + github.com/minio/minio-go/v7 v7.0.57 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 - github.com/prometheus/client_golang v1.15.1 - github.com/shirou/gopsutil/v3 v3.23.4 + github.com/prometheus/client_golang v1.16.0 + github.com/shirou/gopsutil/v3 v3.23.5 github.com/stretchr/testify v1.8.4 github.com/swaggo/echo-swagger v1.4.0 github.com/swaggo/swag v1.16.1 - github.com/vektah/gqlparser/v2 v2.5.1 + github.com/vektah/gqlparser/v2 v2.5.3 github.com/xeipuuv/gojsonschema v1.2.0 go.etcd.io/bbolt v1.3.7 go.uber.org/automaxprocs v1.5.2 go.uber.org/zap v1.24.0 - golang.org/x/mod v0.10.0 + golang.org/x/mod v0.11.0 ) //replace github.com/datarhei/core-client-go/v16 => ../core-client-go @@ -61,7 +61,7 @@ require ( github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect @@ -70,12 +70,12 @@ require ( github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.3 // indirect github.com/iancoleman/orderedmap v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.5 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/klauspost/compress v1.16.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/libdns/libdns v0.2.1 // indirect @@ -99,11 +99,11 @@ require ( github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/sirupsen/logrus v1.9.2 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/swaggo/files/v2 v2.0.0 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect - github.com/tklauser/numcpus v0.6.0 // indirect - github.com/urfave/cli/v2 v2.24.4 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/urfave/cli/v2 v2.25.5 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect @@ -113,12 +113,12 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.9.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/crypto v0.10.0 // indirect + golang.org/x/net v0.11.0 // indirect + golang.org/x/sys v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.9.1 // indirect + golang.org/x/tools v0.10.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index eb3b80be..0a6903f2 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/99designs/gqlgen v0.17.31 h1:VncSQ82VxieHkea8tz11p7h/zSbvHSxSDZfywqWt158= -github.com/99designs/gqlgen v0.17.31/go.mod h1:i4rEatMrzzu6RXaHydq1nmEPZkb3bKQsnxNRHS4DQB4= +github.com/99designs/gqlgen v0.17.33 h1:VTUpAtElDszatPSe26N0SD0deJCSxb7TZLlUb6JnVRY= +github.com/99designs/gqlgen v0.17.33/go.mod h1:ygDK+m8zGpoQuSh8xoq80UfisR5JTZr7mN57qXlSIZs= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= @@ -8,11 +8,8 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/adhocore/gronx v1.1.2 h1:Hgm+d8SyGtn+rCoDkxZq3nLNFLLkzRGR7L2ziRRD1w8= -github.com/adhocore/gronx v1.1.2/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/adhocore/gronx v1.6.3 h1:bnm5vieTrY3QQPpsfB0hrAaeaHDpuZTUC2LLCVMLe9c= +github.com/adhocore/gronx v1.6.3/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -29,7 +26,6 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+ github.com/atrox/haikunatorgo/v2 v2.0.1 h1:FCVx2KL2YvZtI1rI9WeEHxeLRrKGr0Dd4wfCJiUXupc= github.com/atrox/haikunatorgo/v2 v2.0.1/go.mod h1:BBQmx2o+1Z5poziaHRgddAZKOpijwfKdAmMnSYlFK70= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4= github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -38,10 +34,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/caddyserver/certmagic v0.17.2 h1:o30seC1T/dBqBCNNGNHWwj2i5/I/FMjBbTAhjADP3nE= -github.com/caddyserver/certmagic v0.17.2/go.mod h1:ouWUuC490GOLJzkyN35eXfV8bSbwMwSf4bdhkIxtdQE= -github.com/casbin/casbin/v2 v2.69.1 h1:R3e7uveIRN5Pdqvq0GXEhXmn7HyfoEVjp21/mgEXbdI= -github.com/casbin/casbin/v2 v2.69.1/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/caddyserver/certmagic v0.18.0 h1:L22mJES1WllfLoHUcQUy4wVO7UfOsoL5wtg/Bj7kmIw= +github.com/caddyserver/certmagic v0.18.0/go.mod h1:e0YLTnXIopZ05bBWCLzpIf1Yvk27Q90FGUmGowFRDY8= +github.com/casbin/casbin/v2 v2.71.0 h1:pVzHKXkGgOXIjksEwnrOjNu5CE4xy6aAVzdR8td2gSc= +github.com/casbin/casbin/v2 v2.71.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -50,14 +46,10 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/datarhei/core-client-go/v16 v16.11.1-0.20230605095314-42546fbbbece h1:Gv+W986jLcMa/TOKg5YF3RMDlNDDyj7uHuH+mHP7xq8= -github.com/datarhei/core-client-go/v16 v16.11.1-0.20230605095314-42546fbbbece/go.mod h1:6L0zr/NUwvaPsCTK/IL17m8JUEtgLp3BDtlsBREwacg= -github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614130211-fb0f92af8ac9 h1:ntM1tymajXx92ydwi6RSiDG54aQV3cMOtlGRBT6p9Z8= -github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614130211-fb0f92af8ac9/go.mod h1:6L0zr/NUwvaPsCTK/IL17m8JUEtgLp3BDtlsBREwacg= github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614141756-a25a5fc3c60e h1:iQKqGTyIdCyO7kY/G5MCKhzt3xZ5YPRubbJskVp5EvQ= github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614141756-a25a5fc3c60e/go.mod h1:6L0zr/NUwvaPsCTK/IL17m8JUEtgLp3BDtlsBREwacg= -github.com/datarhei/gosrt v0.4.1 h1:08km3wKy72jOdC+JzBDWN57H7xST4mz5lFeJQHuWmMs= -github.com/datarhei/gosrt v0.4.1/go.mod h1:FtsulRiUc67Oi3Ii9JH9aQkpO+ZfgeauRAtIE40mIVA= +github.com/datarhei/gosrt v0.5.0 h1:MhM8kb00nbWc+haNKU7ZdYgSm9pLdxdtas7tcERh8j8= +github.com/datarhei/gosrt v0.5.0/go.mod h1:bcLf0p0FeZl+QY87b+Q8nGkyjyX6IDvI9y9raol8vng= github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a h1:Tf4DSHY1xruBglr+yYP5Wct7czM86GKMYgbXH8a7OFo= github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -79,7 +71,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -93,15 +84,16 @@ github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= -github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k= +github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -144,8 +136,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= -github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.3 h1:kmRrRLlInXvng0SmLxmQpQkpbYAvcXm7NPDrgxJa9mE= +github.com/hashicorp/golang-lru/v2 v2.0.3/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.5.0 h1:uNs9EfJ4FwiArZRxxfd/dQ5d33nV31/CdCHArH89hT8= github.com/hashicorp/raft v1.5.0/go.mod h1:pKHB2mf/Y25u3AHNSXVRv+yT+WAnmeTX0BwVppVQV+M= @@ -162,18 +154,16 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= +github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -224,8 +214,8 @@ github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.55 h1:ZXqUO/8cgfHzI+08h/zGuTTFpISSA32BZmBE3FCLJas= -github.com/minio/minio-go/v7 v7.0.55/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE= +github.com/minio/minio-go/v7 v7.0.57 h1:xsFiOiWjpC1XAGbFEUOzj1/gMXGz7ljfxifwcb/5YXU= +github.com/minio/minio-go/v7 v7.0.57/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -238,7 +228,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -259,8 +248,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -282,20 +271,18 @@ github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v3 v3.23.4 h1:hZwmDxZs7Ewt75DV81r4pFMqbq+di2cbt9FsQBqLD2o= -github.com/shirou/gopsutil/v3 v3.23.4/go.mod h1:ZcGxyfzAMRevhUR2+cfhXDH6gQdFYE/t8j1nsU4mPI8= -github.com/shoenig/go-m1cpu v0.1.5/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shirou/gopsutil/v3 v3.23.5 h1:5SgDCeQ0KW0S4N0znjeM/eFHXXOKyv2dVNgRq/c9P6Y= +github.com/shirou/gopsutil/v3 v3.23.5/go.mod h1:Ng3Maa27Q2KARVJ0SPZF5NdrQSC3XHKP8IIWrHgMeLY= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.3/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= -github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -311,6 +298,7 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/swaggo/echo-swagger v1.4.0 h1:RCxLKySw1SceHLqnmc41pKyiIeE+OiD7NSI7FUOBlLo= @@ -321,18 +309,19 @@ github.com/swaggo/swag v1.16.1 h1:fTNRhKstPKxcnoKsytm4sahr8FaYzUcT7i1/3nd/fBg= github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKikGxto= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU= -github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc= +github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4= -github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs= +github.com/vektah/gqlparser/v2 v2.5.3 h1:goUwv4+blhtwR3GwefadPVI4ubYc/WZSypljWMQa6IE= +github.com/vektah/gqlparser/v2 v2.5.3/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -343,7 +332,6 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -362,12 +350,12 @@ go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -375,14 +363,14 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -401,19 +389,19 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -422,8 +410,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -445,7 +433,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/http/graph/graph/graph.go b/http/graph/graph/graph.go index 57d0564e..6db1ccba 100644 --- a/http/graph/graph/graph.go +++ b/http/graph/graph/graph.go @@ -304,7 +304,7 @@ func (e *executableSchema) Schema() *ast.Schema { } func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) { - ec := executionContext{nil, e} + ec := executionContext{nil, e, 0, 0, nil} _ = ec switch typeName + "." + field { @@ -1480,7 +1480,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { rc := graphql.GetOperationContext(ctx) - ec := executionContext{rc, e} + ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)} inputUnmarshalMap := graphql.BuildUnmarshalerMap( ec.unmarshalInputMetricInput, ec.unmarshalInputMetricsInput, @@ -1490,18 +1490,33 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { switch rc.Operation.Operation { case ast.Query: return func(ctx context.Context) *graphql.Response { - if !first { - return nil + var response graphql.Response + var data graphql.Marshaler + if first { + first = false + ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) + data = ec._Query(ctx, rc.Operation.SelectionSet) + } else { + if atomic.LoadInt32(&ec.pendingDeferred) > 0 { + result := <-ec.deferredResults + atomic.AddInt32(&ec.pendingDeferred, -1) + data = result.Result + response.Path = result.Path + response.Label = result.Label + response.Errors = result.Errors + } else { + return nil + } } - first = false - ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) - data := ec._Query(ctx, rc.Operation.SelectionSet) var buf bytes.Buffer data.MarshalGQL(&buf) - - return &graphql.Response{ - Data: buf.Bytes(), + response.Data = buf.Bytes() + if atomic.LoadInt32(&ec.deferred) > 0 { + hasNext := atomic.LoadInt32(&ec.pendingDeferred) > 0 + response.HasNext = &hasNext } + + return &response } case ast.Mutation: return func(ctx context.Context) *graphql.Response { @@ -1527,6 +1542,28 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { type executionContext struct { *graphql.OperationContext *executableSchema + deferred int32 + pendingDeferred int32 + deferredResults chan graphql.DeferredResult +} + +func (ec *executionContext) processDeferredGroup(dg graphql.DeferredGroup) { + atomic.AddInt32(&ec.pendingDeferred, 1) + go func() { + ctx := graphql.WithFreshResponseContext(dg.Context) + dg.FieldSet.Dispatch(ctx) + ds := graphql.DeferredResult{ + Path: dg.Path, + Label: dg.Label, + Result: dg.FieldSet, + Errors: graphql.GetErrors(ctx), + } + // null fields should bubble up + if dg.FieldSet.Invalids > 0 { + ds.Result = graphql.Null + } + ec.deferredResults <- ds + }() } func (ec *executionContext) introspectSchema() (*introspection.Schema, error) { @@ -8337,7 +8374,7 @@ func (ec *executionContext) fieldContext_Query_metrics(ctx context.Context, fiel ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_metrics_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -8421,7 +8458,7 @@ func (ec *executionContext) fieldContext_Query_playoutStatus(ctx context.Context ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_playoutStatus_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -8498,7 +8535,7 @@ func (ec *executionContext) fieldContext_Query_processes(ctx context.Context, fi ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_processes_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -8572,7 +8609,7 @@ func (ec *executionContext) fieldContext_Query_process(ctx context.Context, fiel ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_process_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -8633,7 +8670,7 @@ func (ec *executionContext) fieldContext_Query_probe(ctx context.Context, field ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query_probe_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -8707,7 +8744,7 @@ func (ec *executionContext) fieldContext_Query___type(ctx context.Context, field ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field_Query___type_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -11227,7 +11264,7 @@ func (ec *executionContext) fieldContext___Type_fields(ctx context.Context, fiel ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field___Type_fields_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -11415,7 +11452,7 @@ func (ec *executionContext) fieldContext___Type_enumValues(ctx context.Context, ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.field___Type_enumValues_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) - return + return fc, err } return fc, nil } @@ -11597,18 +11634,20 @@ func (ec *executionContext) unmarshalInputMetricInput(ctx context.Context, obj i var err error ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) - it.Name, err = ec.unmarshalNString2string(ctx, v) + data, err := ec.unmarshalNString2string(ctx, v) if err != nil { return it, err } + it.Name = data case "labels": var err error ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("labels")) - it.Labels, err = ec.unmarshalOMap2map(ctx, v) + data, err := ec.unmarshalOMap2map(ctx, v) if err != nil { return it, err } + it.Labels = data } } @@ -11633,26 +11672,29 @@ func (ec *executionContext) unmarshalInputMetricsInput(ctx context.Context, obj var err error ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("timerange_seconds")) - it.TimerangeSeconds, err = ec.unmarshalOInt2ᚖint(ctx, v) + data, err := ec.unmarshalOInt2ᚖint(ctx, v) if err != nil { return it, err } + it.TimerangeSeconds = data case "interval_seconds": var err error ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("interval_seconds")) - it.IntervalSeconds, err = ec.unmarshalOInt2ᚖint(ctx, v) + data, err := ec.unmarshalOInt2ᚖint(ctx, v) if err != nil { return it, err } + it.IntervalSeconds = data case "metrics": var err error ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) - it.Metrics, err = ec.unmarshalNMetricInput2ᚕᚖgithubᚗcomᚋdatarheiᚋcoreᚋv16ᚋhttpᚋgraphᚋmodelsᚐMetricInputᚄ(ctx, v) + data, err := ec.unmarshalNMetricInput2ᚕᚖgithubᚗcomᚋdatarheiᚋcoreᚋv16ᚋhttpᚋgraphᚋmodelsᚐMetricInputᚄ(ctx, v) if err != nil { return it, err } + it.Metrics = data } } @@ -11694,90 +11736,83 @@ var aVStreamImplementors = []string{"AVStream"} func (ec *executionContext) _AVStream(ctx context.Context, sel ast.SelectionSet, obj *models.AVStream) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, aVStreamImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("AVStream") case "input": - out.Values[i] = ec._AVStream_input(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "output": - out.Values[i] = ec._AVStream_output(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "aqueue": - out.Values[i] = ec._AVStream_aqueue(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "queue": - out.Values[i] = ec._AVStream_queue(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "dup": - out.Values[i] = ec._AVStream_dup(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "drop": - out.Values[i] = ec._AVStream_drop(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "enc": - out.Values[i] = ec._AVStream_enc(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "looping": - out.Values[i] = ec._AVStream_looping(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "duplicating": - out.Values[i] = ec._AVStream_duplicating(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "gop": - out.Values[i] = ec._AVStream_gop(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -11785,48 +11820,53 @@ var aVStreamIOImplementors = []string{"AVStreamIO"} func (ec *executionContext) _AVStreamIO(ctx context.Context, sel ast.SelectionSet, obj *models.AVStreamIo) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, aVStreamIOImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("AVStreamIO") case "state": - out.Values[i] = ec._AVStreamIO_state(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "packet": - out.Values[i] = ec._AVStreamIO_packet(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "time": - out.Values[i] = ec._AVStreamIO_time(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "size_kb": - out.Values[i] = ec._AVStreamIO_size_kb(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -11834,62 +11874,63 @@ var aboutImplementors = []string{"About"} func (ec *executionContext) _About(ctx context.Context, sel ast.SelectionSet, obj *models.About) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, aboutImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("About") case "app": - out.Values[i] = ec._About_app(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "id": - out.Values[i] = ec._About_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "name": - out.Values[i] = ec._About_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "created_at": - out.Values[i] = ec._About_created_at(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "uptime_seconds": - out.Values[i] = ec._About_uptime_seconds(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "version": - out.Values[i] = ec._About_version(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -11897,62 +11938,63 @@ var aboutVersionImplementors = []string{"AboutVersion"} func (ec *executionContext) _AboutVersion(ctx context.Context, sel ast.SelectionSet, obj *models.AboutVersion) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, aboutVersionImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("AboutVersion") case "number": - out.Values[i] = ec._AboutVersion_number(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "repository_commit": - out.Values[i] = ec._AboutVersion_repository_commit(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "repository_branch": - out.Values[i] = ec._AboutVersion_repository_branch(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "build_date": - out.Values[i] = ec._AboutVersion_build_date(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "arch": - out.Values[i] = ec._AboutVersion_arch(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "compiler": - out.Values[i] = ec._AboutVersion_compiler(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -11960,38 +12002,45 @@ var metricImplementors = []string{"Metric"} func (ec *executionContext) _Metric(ctx context.Context, sel ast.SelectionSet, obj *models.Metric) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, metricImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Metric") case "name": - out.Values[i] = ec._Metric_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "labels": - out.Values[i] = ec._Metric_labels(ctx, field, obj) - case "values": - out.Values[i] = ec._Metric_values(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -11999,35 +12048,42 @@ var metricsImplementors = []string{"Metrics"} func (ec *executionContext) _Metrics(ctx context.Context, sel ast.SelectionSet, obj *models.Metrics) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, metricsImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Metrics") case "timerange_seconds": - out.Values[i] = ec._Metrics_timerange_seconds(ctx, field, obj) - case "interval_seconds": - out.Values[i] = ec._Metrics_interval_seconds(ctx, field, obj) - case "metrics": - out.Values[i] = ec._Metrics_metrics(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12040,7 +12096,7 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) }) out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{ Object: field.Name, @@ -12051,22 +12107,32 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) case "__typename": out.Values[i] = graphql.MarshalString("Mutation") case "ping": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_ping(ctx, field) }) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12074,34 +12140,43 @@ var probeImplementors = []string{"Probe"} func (ec *executionContext) _Probe(ctx context.Context, sel ast.SelectionSet, obj *models.Probe) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, probeImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Probe") case "streams": - out.Values[i] = ec._Probe_streams(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "log": - out.Values[i] = ec._Probe_log(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12109,132 +12184,113 @@ var probeIOImplementors = []string{"ProbeIO"} func (ec *executionContext) _ProbeIO(ctx context.Context, sel ast.SelectionSet, obj *models.ProbeIo) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, probeIOImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProbeIO") case "url": - out.Values[i] = ec._ProbeIO_url(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "index": - out.Values[i] = ec._ProbeIO_index(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "stream": - out.Values[i] = ec._ProbeIO_stream(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "language": - out.Values[i] = ec._ProbeIO_language(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "type": - out.Values[i] = ec._ProbeIO_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "codec": - out.Values[i] = ec._ProbeIO_codec(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "coder": - out.Values[i] = ec._ProbeIO_coder(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "bitrate_kbps": - out.Values[i] = ec._ProbeIO_bitrate_kbps(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "duration_seconds": - out.Values[i] = ec._ProbeIO_duration_seconds(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "fps": - out.Values[i] = ec._ProbeIO_fps(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "pix_fmt": - out.Values[i] = ec._ProbeIO_pix_fmt(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "width": - out.Values[i] = ec._ProbeIO_width(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "height": - out.Values[i] = ec._ProbeIO_height(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "sampling": - out.Values[i] = ec._ProbeIO_sampling(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "layout": - out.Values[i] = ec._ProbeIO_layout(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "channels": - out.Values[i] = ec._ProbeIO_channels(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12242,87 +12298,80 @@ var processImplementors = []string{"Process"} func (ec *executionContext) _Process(ctx context.Context, sel ast.SelectionSet, obj *models.Process) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, processImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Process") case "id": - out.Values[i] = ec._Process_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "owner": - out.Values[i] = ec._Process_owner(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "domain": - out.Values[i] = ec._Process_domain(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "type": - out.Values[i] = ec._Process_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "reference": - out.Values[i] = ec._Process_reference(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "created_at": - out.Values[i] = ec._Process_created_at(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "config": - out.Values[i] = ec._Process_config(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "state": - out.Values[i] = ec._Process_state(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "report": - out.Values[i] = ec._Process_report(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "metadata": - out.Values[i] = ec._Process_metadata(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12330,111 +12379,98 @@ var processConfigImplementors = []string{"ProcessConfig"} func (ec *executionContext) _ProcessConfig(ctx context.Context, sel ast.SelectionSet, obj *models.ProcessConfig) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, processConfigImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProcessConfig") case "id": - out.Values[i] = ec._ProcessConfig_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "owner": - out.Values[i] = ec._ProcessConfig_owner(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "domain": - out.Values[i] = ec._ProcessConfig_domain(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "type": - out.Values[i] = ec._ProcessConfig_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "reference": - out.Values[i] = ec._ProcessConfig_reference(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "input": - out.Values[i] = ec._ProcessConfig_input(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "output": - out.Values[i] = ec._ProcessConfig_output(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "options": - out.Values[i] = ec._ProcessConfig_options(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "reconnect": - out.Values[i] = ec._ProcessConfig_reconnect(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "reconnect_delay_seconds": - out.Values[i] = ec._ProcessConfig_reconnect_delay_seconds(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "autostart": - out.Values[i] = ec._ProcessConfig_autostart(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "stale_timeout_seconds": - out.Values[i] = ec._ProcessConfig_stale_timeout_seconds(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "limits": - out.Values[i] = ec._ProcessConfig_limits(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12442,41 +12478,48 @@ var processConfigIOImplementors = []string{"ProcessConfigIO"} func (ec *executionContext) _ProcessConfigIO(ctx context.Context, sel ast.SelectionSet, obj *models.ProcessConfigIo) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, processConfigIOImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProcessConfigIO") case "id": - out.Values[i] = ec._ProcessConfigIO_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "address": - out.Values[i] = ec._ProcessConfigIO_address(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "options": - out.Values[i] = ec._ProcessConfigIO_options(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12484,41 +12527,48 @@ var processConfigLimitsImplementors = []string{"ProcessConfigLimits"} func (ec *executionContext) _ProcessConfigLimits(ctx context.Context, sel ast.SelectionSet, obj *models.ProcessConfigLimits) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, processConfigLimitsImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProcessConfigLimits") case "cpu_usage": - out.Values[i] = ec._ProcessConfigLimits_cpu_usage(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "memory_bytes": - out.Values[i] = ec._ProcessConfigLimits_memory_bytes(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "waitfor_seconds": - out.Values[i] = ec._ProcessConfigLimits_waitfor_seconds(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12526,48 +12576,53 @@ var processReportImplementors = []string{"ProcessReport", "IProcessReportHistory func (ec *executionContext) _ProcessReport(ctx context.Context, sel ast.SelectionSet, obj *models.ProcessReport) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, processReportImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProcessReport") case "created_at": - out.Values[i] = ec._ProcessReport_created_at(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "prelude": - out.Values[i] = ec._ProcessReport_prelude(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "log": - out.Values[i] = ec._ProcessReport_log(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "history": - out.Values[i] = ec._ProcessReport_history(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12575,41 +12630,48 @@ var processReportHistoryEntryImplementors = []string{"ProcessReportHistoryEntry" func (ec *executionContext) _ProcessReportHistoryEntry(ctx context.Context, sel ast.SelectionSet, obj *models.ProcessReportHistoryEntry) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, processReportHistoryEntryImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProcessReportHistoryEntry") case "created_at": - out.Values[i] = ec._ProcessReportHistoryEntry_created_at(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "prelude": - out.Values[i] = ec._ProcessReportHistoryEntry_prelude(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "log": - out.Values[i] = ec._ProcessReportHistoryEntry_log(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12617,34 +12679,43 @@ var processReportLogEntryImplementors = []string{"ProcessReportLogEntry"} func (ec *executionContext) _ProcessReportLogEntry(ctx context.Context, sel ast.SelectionSet, obj *models.ProcessReportLogEntry) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, processReportLogEntryImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProcessReportLogEntry") case "timestamp": - out.Values[i] = ec._ProcessReportLogEntry_timestamp(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "data": - out.Values[i] = ec._ProcessReportLogEntry_data(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12652,83 +12723,78 @@ var processStateImplementors = []string{"ProcessState"} func (ec *executionContext) _ProcessState(ctx context.Context, sel ast.SelectionSet, obj *models.ProcessState) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, processStateImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProcessState") case "order": - out.Values[i] = ec._ProcessState_order(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "state": - out.Values[i] = ec._ProcessState_state(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "runtime_seconds": - out.Values[i] = ec._ProcessState_runtime_seconds(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "reconnect_seconds": - out.Values[i] = ec._ProcessState_reconnect_seconds(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "last_logline": - out.Values[i] = ec._ProcessState_last_logline(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "progress": - out.Values[i] = ec._ProcessState_progress(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "memory_bytes": - out.Values[i] = ec._ProcessState_memory_bytes(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "cpu_usage": - out.Values[i] = ec._ProcessState_cpu_usage(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "command": - out.Values[i] = ec._ProcessState_command(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12736,104 +12802,93 @@ var progressImplementors = []string{"Progress"} func (ec *executionContext) _Progress(ctx context.Context, sel ast.SelectionSet, obj *models.Progress) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, progressImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Progress") case "input": - out.Values[i] = ec._Progress_input(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "output": - out.Values[i] = ec._Progress_output(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "frame": - out.Values[i] = ec._Progress_frame(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "packet": - out.Values[i] = ec._Progress_packet(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "fps": - out.Values[i] = ec._Progress_fps(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "q": - out.Values[i] = ec._Progress_q(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "size_kb": - out.Values[i] = ec._Progress_size_kb(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "time": - out.Values[i] = ec._Progress_time(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "bitrate_kbit": - out.Values[i] = ec._Progress_bitrate_kbit(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "speed": - out.Values[i] = ec._Progress_speed(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "drop": - out.Values[i] = ec._Progress_drop(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "dup": - out.Values[i] = ec._Progress_dup(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -12841,171 +12896,140 @@ var progressIOImplementors = []string{"ProgressIO"} func (ec *executionContext) _ProgressIO(ctx context.Context, sel ast.SelectionSet, obj *models.ProgressIo) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, progressIOImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("ProgressIO") case "id": - out.Values[i] = ec._ProgressIO_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "address": - out.Values[i] = ec._ProgressIO_address(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "index": - out.Values[i] = ec._ProgressIO_index(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "stream": - out.Values[i] = ec._ProgressIO_stream(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "format": - out.Values[i] = ec._ProgressIO_format(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "type": - out.Values[i] = ec._ProgressIO_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "codec": - out.Values[i] = ec._ProgressIO_codec(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "coder": - out.Values[i] = ec._ProgressIO_coder(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "frame": - out.Values[i] = ec._ProgressIO_frame(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "fps": - out.Values[i] = ec._ProgressIO_fps(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "packet": - out.Values[i] = ec._ProgressIO_packet(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "pps": - out.Values[i] = ec._ProgressIO_pps(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "size_kb": - out.Values[i] = ec._ProgressIO_size_kb(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "bitrate_kbit": - out.Values[i] = ec._ProgressIO_bitrate_kbit(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "pixfmt": - out.Values[i] = ec._ProgressIO_pixfmt(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "q": - out.Values[i] = ec._ProgressIO_q(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "width": - out.Values[i] = ec._ProgressIO_width(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "height": - out.Values[i] = ec._ProgressIO_height(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "sampling": - out.Values[i] = ec._ProgressIO_sampling(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "layout": - out.Values[i] = ec._ProgressIO_layout(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "channels": - out.Values[i] = ec._ProgressIO_channels(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "avstream": - out.Values[i] = ec._ProgressIO_avstream(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13018,7 +13042,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }) out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{ Object: field.Name, @@ -13031,7 +13055,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr case "ping": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13039,22 +13063,21 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }() res = ec._Query_ping(ctx, field) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "about": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13065,16 +13088,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "log": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13082,22 +13104,21 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }() res = ec._Query_log(ctx, field) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "metrics": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13105,22 +13126,21 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }() res = ec._Query_metrics(ctx, field) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "playoutStatus": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13131,16 +13151,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "processes": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13148,22 +13167,21 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }() res = ec._Query_processes(ctx, field) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "process": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13174,16 +13192,15 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "probe": field := field - innerFunc := func(ctx context.Context) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13191,38 +13208,45 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr }() res = ec._Query_probe(ctx, field) if res == graphql.Null { - atomic.AddUint32(&invalids, 1) + atomic.AddUint32(&fs.Invalids, 1) } return res } rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc) + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func() graphql.Marshaler { - return rrm(innerCtx) - }) + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "__type": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Query___type(ctx, field) }) - case "__schema": - out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Query___schema(ctx, field) }) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13230,122 +13254,105 @@ var rawAVstreamImplementors = []string{"RawAVstream"} func (ec *executionContext) _RawAVstream(ctx context.Context, sel ast.SelectionSet, obj *models.RawAVstream) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, rawAVstreamImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("RawAVstream") case "id": - out.Values[i] = ec._RawAVstream_id(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "url": - out.Values[i] = ec._RawAVstream_url(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "stream": - out.Values[i] = ec._RawAVstream_stream(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "queue": - out.Values[i] = ec._RawAVstream_queue(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "aqueue": - out.Values[i] = ec._RawAVstream_aqueue(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "dup": - out.Values[i] = ec._RawAVstream_dup(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "drop": - out.Values[i] = ec._RawAVstream_drop(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "enc": - out.Values[i] = ec._RawAVstream_enc(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "looping": - out.Values[i] = ec._RawAVstream_looping(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "duplicating": - out.Values[i] = ec._RawAVstream_duplicating(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "gop": - out.Values[i] = ec._RawAVstream_gop(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "debug": - out.Values[i] = ec._RawAVstream_debug(ctx, field, obj) - case "input": - out.Values[i] = ec._RawAVstream_input(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "output": - out.Values[i] = ec._RawAVstream_output(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "swap": - out.Values[i] = ec._RawAVstream_swap(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13353,48 +13360,53 @@ var rawAVstreamIOImplementors = []string{"RawAVstreamIO"} func (ec *executionContext) _RawAVstreamIO(ctx context.Context, sel ast.SelectionSet, obj *models.RawAVstreamIo) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, rawAVstreamIOImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("RawAVstreamIO") case "state": - out.Values[i] = ec._RawAVstreamIO_state(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "packet": - out.Values[i] = ec._RawAVstreamIO_packet(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "time": - out.Values[i] = ec._RawAVstreamIO_time(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "size_kb": - out.Values[i] = ec._RawAVstreamIO_size_kb(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13402,48 +13414,53 @@ var rawAVstreamSwapImplementors = []string{"RawAVstreamSwap"} func (ec *executionContext) _RawAVstreamSwap(ctx context.Context, sel ast.SelectionSet, obj *models.RawAVstreamSwap) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, rawAVstreamSwapImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("RawAVstreamSwap") case "url": - out.Values[i] = ec._RawAVstreamSwap_url(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "status": - out.Values[i] = ec._RawAVstreamSwap_status(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "lasturl": - out.Values[i] = ec._RawAVstreamSwap_lasturl(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "lasterror": - out.Values[i] = ec._RawAVstreamSwap_lasterror(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13451,52 +13468,55 @@ var __DirectiveImplementors = []string{"__Directive"} func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __DirectiveImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Directive") case "name": - out.Values[i] = ec.___Directive_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "description": - out.Values[i] = ec.___Directive_description(ctx, field, obj) - case "locations": - out.Values[i] = ec.___Directive_locations(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "args": - out.Values[i] = ec.___Directive_args(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "isRepeatable": - out.Values[i] = ec.___Directive_isRepeatable(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13504,42 +13524,47 @@ var __EnumValueImplementors = []string{"__EnumValue"} func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __EnumValueImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__EnumValue") case "name": - out.Values[i] = ec.___EnumValue_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "description": - out.Values[i] = ec.___EnumValue_description(ctx, field, obj) - case "isDeprecated": - out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "deprecationReason": - out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13547,56 +13572,57 @@ var __FieldImplementors = []string{"__Field"} func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __FieldImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Field") case "name": - out.Values[i] = ec.___Field_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "description": - out.Values[i] = ec.___Field_description(ctx, field, obj) - case "args": - out.Values[i] = ec.___Field_args(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "type": - out.Values[i] = ec.___Field_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "isDeprecated": - out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "deprecationReason": - out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13604,42 +13630,47 @@ var __InputValueImplementors = []string{"__InputValue"} func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __InputValueImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__InputValue") case "name": - out.Values[i] = ec.___InputValue_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "description": - out.Values[i] = ec.___InputValue_description(ctx, field, obj) - case "type": - out.Values[i] = ec.___InputValue_type(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "defaultValue": - out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13647,53 +13678,54 @@ var __SchemaImplementors = []string{"__Schema"} func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __SchemaImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Schema") case "description": - out.Values[i] = ec.___Schema_description(ctx, field, obj) - case "types": - out.Values[i] = ec.___Schema_types(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "queryType": - out.Values[i] = ec.___Schema_queryType(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "mutationType": - out.Values[i] = ec.___Schema_mutationType(ctx, field, obj) - case "subscriptionType": - out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj) - case "directives": - out.Values[i] = ec.___Schema_directives(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } @@ -13701,63 +13733,56 @@ var __TypeImplementors = []string{"__Type"} func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, __TypeImplementors) + out := graphql.NewFieldSet(fields) - var invalids uint32 + deferred := make(map[string]*graphql.FieldSet) for i, field := range fields { switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Type") case "kind": - out.Values[i] = ec.___Type_kind(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + out.Invalids++ } case "name": - out.Values[i] = ec.___Type_name(ctx, field, obj) - case "description": - out.Values[i] = ec.___Type_description(ctx, field, obj) - case "fields": - out.Values[i] = ec.___Type_fields(ctx, field, obj) - case "interfaces": - out.Values[i] = ec.___Type_interfaces(ctx, field, obj) - case "possibleTypes": - out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj) - case "enumValues": - out.Values[i] = ec.___Type_enumValues(ctx, field, obj) - case "inputFields": - out.Values[i] = ec.___Type_inputFields(ctx, field, obj) - case "ofType": - out.Values[i] = ec.___Type_ofType(ctx, field, obj) - case "specifiedByURL": - out.Values[i] = ec.___Type_specifiedByURL(ctx, field, obj) - default: panic("unknown field " + strconv.Quote(field.Name)) } } - out.Dispatch() - if invalids > 0 { + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + return out } diff --git a/http/graph/models/models_gen.go b/http/graph/models/models_gen.go index 56bb286b..64fae4f7 100644 --- a/http/graph/models/models_gen.go +++ b/http/graph/models/models_gen.go @@ -58,24 +58,24 @@ type AboutVersion struct { type Metric struct { Name string `json:"name"` - Labels map[string]interface{} `json:"labels"` + Labels map[string]interface{} `json:"labels,omitempty"` Values []*scalars.MetricsResponseValue `json:"values"` } type MetricInput struct { Name string `json:"name"` - Labels map[string]interface{} `json:"labels"` + Labels map[string]interface{} `json:"labels,omitempty"` } type Metrics struct { - TimerangeSeconds *int `json:"timerange_seconds"` - IntervalSeconds *int `json:"interval_seconds"` + TimerangeSeconds *int `json:"timerange_seconds,omitempty"` + IntervalSeconds *int `json:"interval_seconds,omitempty"` Metrics []*Metric `json:"metrics"` } type MetricsInput struct { - TimerangeSeconds *int `json:"timerange_seconds"` - IntervalSeconds *int `json:"interval_seconds"` + TimerangeSeconds *int `json:"timerange_seconds,omitempty"` + IntervalSeconds *int `json:"interval_seconds,omitempty"` Metrics []*MetricInput `json:"metrics"` } @@ -113,7 +113,7 @@ type Process struct { Config *ProcessConfig `json:"config"` State *ProcessState `json:"state"` Report *ProcessReport `json:"report"` - Metadata map[string]interface{} `json:"metadata"` + Metadata map[string]interface{} `json:"metadata,omitempty"` } type ProcessConfig struct { @@ -257,7 +257,7 @@ type ProgressIo struct { Sampling scalars.Uint64 `json:"sampling"` Layout string `json:"layout"` Channels scalars.Uint64 `json:"channels"` - Avstream *AVStream `json:"avstream"` + Avstream *AVStream `json:"avstream,omitempty"` } type RawAVstream struct { @@ -272,7 +272,7 @@ type RawAVstream struct { Looping bool `json:"looping"` Duplicating bool `json:"duplicating"` Gop string `json:"gop"` - Debug interface{} `json:"debug"` + Debug interface{} `json:"debug,omitempty"` Input *RawAVstreamIo `json:"input"` Output *RawAVstreamIo `json:"output"` Swap *RawAVstreamSwap `json:"swap"` @@ -341,17 +341,17 @@ type State string const ( StateRunning State = "RUNNING" - StateIDLe State = "IDLE" + StateIdle State = "IDLE" ) var AllState = []State{ StateRunning, - StateIDLe, + StateIdle, } func (e State) IsValid() bool { switch e { - case StateRunning, StateIDLe: + case StateRunning, StateIdle: return true } return false diff --git a/http/graph/resolver/about.resolvers.go b/http/graph/resolver/about.resolvers.go index d62771b5..266c7fab 100644 --- a/http/graph/resolver/about.resolvers.go +++ b/http/graph/resolver/about.resolvers.go @@ -2,6 +2,7 @@ package resolver // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen version v0.17.33 import ( "context" diff --git a/http/graph/resolver/log.resolvers.go b/http/graph/resolver/log.resolvers.go index 18aa2281..2e29633b 100644 --- a/http/graph/resolver/log.resolvers.go +++ b/http/graph/resolver/log.resolvers.go @@ -2,6 +2,7 @@ package resolver // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen version v0.17.33 import ( "context" diff --git a/http/graph/resolver/metrics.resolvers.go b/http/graph/resolver/metrics.resolvers.go index e9ae7d3e..704ba664 100644 --- a/http/graph/resolver/metrics.resolvers.go +++ b/http/graph/resolver/metrics.resolvers.go @@ -2,6 +2,7 @@ package resolver // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen version v0.17.33 import ( "context" diff --git a/http/graph/resolver/playout.resolvers.go b/http/graph/resolver/playout.resolvers.go index 6c93ed62..d0b77740 100644 --- a/http/graph/resolver/playout.resolvers.go +++ b/http/graph/resolver/playout.resolvers.go @@ -2,6 +2,7 @@ package resolver // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen version v0.17.33 import ( "context" diff --git a/http/graph/resolver/process.resolvers.go b/http/graph/resolver/process.resolvers.go index 2691eeaa..e71b51a0 100644 --- a/http/graph/resolver/process.resolvers.go +++ b/http/graph/resolver/process.resolvers.go @@ -2,6 +2,7 @@ package resolver // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen version v0.17.33 import ( "context" diff --git a/http/graph/resolver/schema.resolvers.go b/http/graph/resolver/schema.resolvers.go index 89a0b922..bb30e9e8 100644 --- a/http/graph/resolver/schema.resolvers.go +++ b/http/graph/resolver/schema.resolvers.go @@ -2,6 +2,7 @@ package resolver // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. +// Code generated by github.com/99designs/gqlgen version v0.17.33 import ( "context" diff --git a/iam/identity/identity.go b/iam/identity/identity.go index 4fadf4e7..57ba5710 100644 --- a/iam/identity/identity.go +++ b/iam/identity/identity.go @@ -50,7 +50,7 @@ func (u *User) validate() error { return fmt.Errorf("a name is required") } - chars := `A-Za-z0-9:_-` + chars := `A-Za-z0-9._-` re := regexp.MustCompile(`[^` + chars + `]`) if re.MatchString(u.Name) { diff --git a/rtmp/rtmp.go b/rtmp/rtmp.go index cfdcc7d2..be349b16 100644 --- a/rtmp/rtmp.go +++ b/rtmp/rtmp.go @@ -196,13 +196,13 @@ func (s *server) Channels() []string { return channels } -func (s *server) log(who, what, action, path, message string, client net.Addr) { +func (s *server) log(who, handler, action, resource, message string, client net.Addr) { s.logger.Info().WithFields(log.Fields{ - "who": who, - "what": what, - "action": action, - "path": path, - "client": client.String(), + "who": who, + "handler": handler, + "action": action, + "resource": resource, + "client": client.String(), }).Log(message) } @@ -258,7 +258,7 @@ func (s *server) handlePlay(conn *rtmp.Conn) { identity, err := s.findIdentityFromStreamKey(token) if err != nil { s.logger.Debug().WithError(err).Log("invalid streamkey") - s.log(identity, "PLAY", "FORBIDDEN", playpath, "invalid streamkey ("+token+")", remote) + s.log("", "PLAY", "FORBIDDEN", playpath, "invalid streamkey ("+token+")", remote) return } @@ -476,13 +476,13 @@ func (s *server) findIdentityFromStreamKey(key string) (string, error) { var token string - elements := strings.Split(key, ":") - if len(elements) == 1 { + before, after, found := strings.Cut(key, ":") + if !found { identity = s.iam.GetDefaultVerifier() - token = elements[0] + token = before } else { - identity, err = s.iam.GetVerifier(elements[0]) - token = elements[1] + identity, err = s.iam.GetVerifier(before) + token = after } if err != nil { @@ -490,7 +490,13 @@ func (s *server) findIdentityFromStreamKey(key string) (string, error) { } if ok, err := identity.VerifyServiceToken(token); !ok { - return "$anon", fmt.Errorf("invalid token: %w", err) + if err != nil { + err = fmt.Errorf("invalid token: %w", err) + } else { + err = fmt.Errorf("invalid token") + } + + return "$anon", err } return identity.Name(), nil diff --git a/srt/srt.go b/srt/srt.go index ad19b42f..78a69394 100644 --- a/srt/srt.go +++ b/srt/srt.go @@ -268,10 +268,11 @@ func (s *server) srtlogListener(ctx context.Context) { } } -func (s *server) log(handler, action, resource, message string, client net.Addr) { +func (s *server) log(who, handler, action, resource, message string, client net.Addr) { s.logger.Info().WithFields(log.Fields{ + "who": who, "handler": handler, - "status": action, + "action": action, "resource": resource, "client": client.String(), }).Log(message) @@ -282,14 +283,19 @@ func (s *server) handleConnect(req srt.ConnRequest) srt.ConnType { client := req.RemoteAddr() streamId := req.StreamId() + if req.Version() != 5 { + s.log("", "CONNECT", "INVALID", streamId, "unsupported version", client) + return srt.REJECT + } + si, err := url.ParseStreamId(streamId) if err != nil { - s.log("CONNECT", "INVALID", "", err.Error(), client) + s.log("", "CONNECT", "INVALID", streamId, err.Error(), client) return srt.REJECT } if len(si.Resource) == 0 { - s.log("CONNECT", "INVALID", "", "stream resource not provided", client) + s.log("", "CONNECT", "INVALID", streamId, "stream resource not provided", client) return srt.REJECT } @@ -298,23 +304,23 @@ func (s *server) handleConnect(req srt.ConnRequest) srt.ConnType { } else if si.Mode == "request" { mode = srt.SUBSCRIBE } else { - s.log("CONNECT", "INVALID", si.Resource, "invalid connection mode", client) + s.log("", "CONNECT", "INVALID", si.Resource, "invalid connection mode", client) return srt.REJECT } if len(s.passphrase) != 0 { if !req.IsEncrypted() { - s.log("CONNECT", "FORBIDDEN", si.Resource, "connection has to be encrypted", client) + s.log("", "CONNECT", "FORBIDDEN", si.Resource, "connection has to be encrypted", client) return srt.REJECT } if err := req.SetPassphrase(s.passphrase); err != nil { - s.log("CONNECT", "FORBIDDEN", si.Resource, err.Error(), client) + s.log("", "CONNECT", "FORBIDDEN", si.Resource, err.Error(), client) return srt.REJECT } } else { if req.IsEncrypted() { - s.log("CONNECT", "INVALID", si.Resource, "connection must not be encrypted", client) + s.log("", "CONNECT", "INVALID", si.Resource, "connection must not be encrypted", client) return srt.REJECT } } @@ -322,7 +328,7 @@ func (s *server) handleConnect(req srt.ConnRequest) srt.ConnType { identity, err := s.findIdentityFromToken(si.Token) if err != nil { s.logger.Debug().WithError(err).Log("invalid token") - s.log("CONNECT", "FORBIDDEN", si.Resource, "invalid token", client) + s.log(identity, "CONNECT", "FORBIDDEN", si.Resource, "invalid token", client) return srt.REJECT } @@ -334,7 +340,7 @@ func (s *server) handleConnect(req srt.ConnRequest) srt.ConnType { } if !s.iam.Enforce(identity, domain, resource, action) { - s.log("CONNECT", "FORBIDDEN", si.Resource, "access denied", client) + s.log(identity, "CONNECT", "FORBIDDEN", si.Resource, "access denied", client) return srt.REJECT } @@ -350,6 +356,7 @@ func (s *server) publish(conn srt.Conn, isProxy bool) error { client := conn.RemoteAddr() si, _ := url.ParseStreamId(streamId) + identity, _ := s.findIdentityFromToken(si.Token) // Look for the stream s.lock.Lock() @@ -363,15 +370,15 @@ func (s *server) publish(conn srt.Conn, isProxy bool) error { s.lock.Unlock() if ch == nil { - s.log("PUBLISH", "CONFLICT", si.Resource, "already publishing", client) + s.log(identity, "PUBLISH", "CONFLICT", si.Resource, "already publishing", client) conn.Close() return fmt.Errorf("already publishing this resource") } - s.log("PUBLISH", "START", si.Resource, "", client) + s.log(identity, "PUBLISH", "START", si.Resource, "", client) // Blocks until connection closes - ch.pubsub.Publish(conn) + err := ch.pubsub.Publish(conn) s.lock.Lock() delete(s.channels, si.Resource) @@ -379,7 +386,7 @@ func (s *server) publish(conn srt.Conn, isProxy bool) error { ch.Close() - s.log("PUBLISH", "STOP", si.Resource, "", client) + s.log(identity, "PUBLISH", "STOP", si.Resource, err.Error(), client) conn.Close() @@ -393,6 +400,7 @@ func (s *server) handleSubscribe(conn srt.Conn) { client := conn.RemoteAddr() si, _ := url.ParseStreamId(streamId) + identity, _ := s.findIdentityFromToken(si.Token) // Look for the stream locally s.lock.RLock() @@ -403,7 +411,7 @@ func (s *server) handleSubscribe(conn srt.Conn) { // Check in the cluster for the stream and proxy it srturl, err := s.proxy.GetURL("srt", si.Resource) if err != nil { - s.log("SUBSCRIBE", "NOTFOUND", si.Resource, "no publisher for this resource found", client) + s.log(identity, "PLAY", "NOTFOUND", si.Resource, "no publisher for this resource found", client) return } @@ -412,16 +420,16 @@ func (s *server) handleSubscribe(conn srt.Conn) { config := srt.DefaultConfig() config.StreamId = streamId config.Latency = 200 * time.Millisecond // This might be a value obtained from the cluster - host, err := config.UnmarshalURL(peerurl) + host, port, err := config.UnmarshalURL(peerurl) if err != nil { s.logger.Error().WithField("address", peerurl).WithError(err).Log("Parsing proxy address failed") - s.log("SUBSCRIBE", "NOTFOUND", si.Resource, "no publisher for this resource found", client) + s.log(identity, "PLAY", "NOTFOUND", si.Resource, "no publisher for this resource found", client) return } - src, err := srt.Dial("srt", host, config) + src, err := srt.Dial("srt", host+":"+port, config) if err != nil { s.logger.Error().WithField("address", peerurl).WithError(err).Log("Proxying address failed") - s.log("SUBSCRIBE", "NOTFOUND", si.Resource, "no publisher for this resource found", client) + s.log(identity, "PLAY", "NOTFOUND", si.Resource, "no publisher for this resource found", client) return } @@ -429,13 +437,13 @@ func (s *server) handleSubscribe(conn srt.Conn) { wg.Add(1) go func() { - s.log("SUBSCRIBE", "PROXYSTART", peerurl, "", client) + s.log(identity, "PLAY", "PROXYSTART", peerurl, "", client) wg.Done() err := s.publish(src, true) if err != nil { s.logger.Error().WithField("address", srturl).WithError(err).Log("Proxying address failed") } - s.log("SUBSCRIBE", "PROXYPUBLISHSTOP", peerurl, "", client) + s.log(identity, "PLAY", "PROXYSTOP", peerurl, "", client) }() // Wait for the goroutine to start @@ -463,14 +471,14 @@ func (s *server) handleSubscribe(conn srt.Conn) { } if ch != nil { - s.log("SUBSCRIBE", "START", si.Resource, "", client) + s.log(identity, "PLAY", "START", si.Resource, "", client) id := ch.AddSubscriber(conn, si.Resource) // Blocks until connection closes - ch.pubsub.Subscribe(conn) + err := ch.pubsub.Subscribe(conn) - s.log("SUBSCRIBE", "STOP", si.Resource, "", client) + s.log(identity, "PLAY", "STOP", si.Resource, err.Error(), client) ch.RemoveSubscriber(id) } @@ -486,13 +494,13 @@ func (s *server) findIdentityFromToken(key string) (string, error) { var token string - elements := strings.Split(key, ":") - if len(elements) == 1 { + before, after, found := strings.Cut(key, ":") + if !found { identity = s.iam.GetDefaultVerifier() - token = elements[0] + token = before } else { - identity, err = s.iam.GetVerifier(elements[0]) - token = elements[1] + identity, err = s.iam.GetVerifier(before) + token = after } if err != nil { @@ -500,7 +508,13 @@ func (s *server) findIdentityFromToken(key string) (string, error) { } if ok, err := identity.VerifyServiceToken(token); !ok { - return "$anon", fmt.Errorf("invalid token: %w", err) + if err != nil { + err = fmt.Errorf("invalid token: %w", err) + } else { + err = fmt.Errorf("invalid token") + } + + return "$anon", err } return identity.Name(), nil diff --git a/vendor/github.com/99designs/gqlgen/.golangci.yml b/vendor/github.com/99designs/gqlgen/.golangci.yml index 21099b69..5966bf2c 100644 --- a/vendor/github.com/99designs/gqlgen/.golangci.yml +++ b/vendor/github.com/99designs/gqlgen/.golangci.yml @@ -11,8 +11,6 @@ linters: disable-all: true enable: - bodyclose - - deadcode - - depguard - dupl - errcheck - gocritic @@ -25,11 +23,9 @@ linters: - nakedret - prealloc - staticcheck - - structcheck - typecheck - unconvert - unused - - varcheck issues: exclude-rules: diff --git a/vendor/github.com/99designs/gqlgen/CHANGELOG.md b/vendor/github.com/99designs/gqlgen/CHANGELOG.md index ee9cb3d8..73c93188 100644 --- a/vendor/github.com/99designs/gqlgen/CHANGELOG.md +++ b/vendor/github.com/99designs/gqlgen/CHANGELOG.md @@ -5,10 +5,186 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.30...HEAD) +## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.32...HEAD) + +## [v0.17.32](https://github.com/99designs/gqlgen/compare/v0.17.31...v0.17.32) - 2023-06-06 +- 3a81a78b release v0.17.32 + +- dbb61174 Added unit tests for defer (#2657) + +
[](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
+
[](https://travis-ci.org/go-playground/validator)
[](https://coveralls.io/github/go-playground/validator?branch=master)
[](https://goreportcard.com/report/github.com/go-playground/validator)
diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go
index 8e6b169c..e676f1d1 100644
--- a/vendor/github.com/go-playground/validator/v10/baked_in.go
+++ b/vendor/github.com/go-playground/validator/v10/baked_in.go
@@ -1414,25 +1414,21 @@ func isURL(fl FieldLevel) bool {
switch field.Kind() {
case reflect.String:
- var i int
s := field.String()
- // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
- // emulate browser and strip the '#' suffix prior to validation. see issue-#237
- if i = strings.Index(s, "#"); i > -1 {
- s = s[:i]
- }
-
if len(s) == 0 {
return false
}
- url, err := url.ParseRequestURI(s)
-
+ url, err := url.Parse(s)
if err != nil || url.Scheme == "" {
return false
}
+ if url.Host == "" && url.Fragment == "" && url.Opaque == "" {
+ return false
+ }
+
return true
}
@@ -1450,7 +1446,13 @@ func isHttpURL(fl FieldLevel) bool {
case reflect.String:
s := strings.ToLower(field.String())
- return strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://")
+
+ url, err := url.Parse(s)
+ if err != nil || url.Host == "" {
+ return false
+ }
+
+ return url.Scheme == "http" || url.Scheme == "https"
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
@@ -2568,9 +2570,17 @@ func isDirPath(fl FieldLevel) bool {
func isJSON(fl FieldLevel) bool {
field := fl.Field()
- if field.Kind() == reflect.String {
+ switch field.Kind() {
+ case reflect.String:
val := field.String()
return json.Valid([]byte(val))
+ case reflect.Slice:
+ fieldType := field.Type()
+
+ if fieldType.ConvertibleTo(byteSliceType) {
+ b := field.Convert(byteSliceType).Interface().([]byte)
+ return json.Valid(b)
+ }
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go
index d2ee8fe3..d9dbf0ce 100644
--- a/vendor/github.com/go-playground/validator/v10/validator_instance.go
+++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go
@@ -53,6 +53,8 @@ var (
timeDurationType = reflect.TypeOf(time.Duration(0))
timeType = reflect.TypeOf(time.Time{})
+ byteSliceType = reflect.TypeOf([]byte{})
+
defaultCField = &cField{namesEqual: true}
)
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/2q.go b/vendor/github.com/hashicorp/golang-lru/v2/2q.go
index 1f0055b7..5e00d9a7 100644
--- a/vendor/github.com/hashicorp/golang-lru/v2/2q.go
+++ b/vendor/github.com/hashicorp/golang-lru/v2/2q.go
@@ -181,6 +181,16 @@ func (c *TwoQueueCache[K, V]) Keys() []K {
return append(k1, k2...)
}
+// Values returns a slice of the values in the cache.
+// The frequently used values are first in the returned slice.
+func (c *TwoQueueCache[K, V]) Values() []V {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ v1 := c.frequent.Values()
+ v2 := c.recent.Values()
+ return append(v1, v2...)
+}
+
// Remove removes the provided key from the cache.
func (c *TwoQueueCache[K, V]) Remove(key K) {
c.lock.Lock()
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/README.md b/vendor/github.com/hashicorp/golang-lru/v2/README.md
index 4c08cc46..b60785fc 100644
--- a/vendor/github.com/hashicorp/golang-lru/v2/README.md
+++ b/vendor/github.com/hashicorp/golang-lru/v2/README.md
@@ -15,11 +15,20 @@ Example
Using the LRU is very simple:
```go
-l, _ := New[int, interface{}](128)
-for i := 0; i < 256; i++ {
- l.Add(i, nil)
-}
-if l.Len() != 128 {
- panic(fmt.Sprintf("bad len: %v", l.Len()))
+package main
+
+import (
+ "fmt"
+ "github.com/hashicorp/golang-lru/v2"
+)
+
+func main() {
+ l, _ := lru.New[int, any](128)
+ for i := 0; i < 256; i++ {
+ l.Add(i, nil)
+ }
+ if l.Len() != 128 {
+ panic(fmt.Sprintf("bad len: %v", l.Len()))
+ }
}
```
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/arc.go b/vendor/github.com/hashicorp/golang-lru/v2/arc.go
deleted file mode 100644
index a255aae2..00000000
--- a/vendor/github.com/hashicorp/golang-lru/v2/arc.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package lru
-
-import (
- "sync"
-
- "github.com/hashicorp/golang-lru/v2/simplelru"
-)
-
-// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
-// ARC is an enhancement over the standard LRU cache in that tracks both
-// frequency and recency of use. This avoids a burst in access to new
-// entries from evicting the frequently used older entries. It adds some
-// additional tracking overhead to a standard LRU cache, computationally
-// it is roughly 2x the cost, and the extra memory overhead is linear
-// with the size of the cache. ARC has been patented by IBM, but is
-// similar to the TwoQueueCache (2Q) which requires setting parameters.
-type ARCCache[K comparable, V any] struct {
- size int // Size is the total capacity of the cache
- p int // P is the dynamic preference towards T1 or T2
-
- t1 simplelru.LRUCache[K, V] // T1 is the LRU for recently accessed items
- b1 simplelru.LRUCache[K, struct{}] // B1 is the LRU for evictions from t1
-
- t2 simplelru.LRUCache[K, V] // T2 is the LRU for frequently accessed items
- b2 simplelru.LRUCache[K, struct{}] // B2 is the LRU for evictions from t2
-
- lock sync.RWMutex
-}
-
-// NewARC creates an ARC of the given size
-func NewARC[K comparable, V any](size int) (*ARCCache[K, V], error) {
- // Create the sub LRUs
- b1, err := simplelru.NewLRU[K, struct{}](size, nil)
- if err != nil {
- return nil, err
- }
- b2, err := simplelru.NewLRU[K, struct{}](size, nil)
- if err != nil {
- return nil, err
- }
- t1, err := simplelru.NewLRU[K, V](size, nil)
- if err != nil {
- return nil, err
- }
- t2, err := simplelru.NewLRU[K, V](size, nil)
- if err != nil {
- return nil, err
- }
-
- // Initialize the ARC
- c := &ARCCache[K, V]{
- size: size,
- p: 0,
- t1: t1,
- b1: b1,
- t2: t2,
- b2: b2,
- }
- return c, nil
-}
-
-// Get looks up a key's value from the cache.
-func (c *ARCCache[K, V]) Get(key K) (value V, ok bool) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // If the value is contained in T1 (recent), then
- // promote it to T2 (frequent)
- if val, ok := c.t1.Peek(key); ok {
- c.t1.Remove(key)
- c.t2.Add(key, val)
- return val, ok
- }
-
- // Check if the value is contained in T2 (frequent)
- if val, ok := c.t2.Get(key); ok {
- return val, ok
- }
-
- // No hit
- return
-}
-
-// Add adds a value to the cache.
-func (c *ARCCache[K, V]) Add(key K, value V) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Check if the value is contained in T1 (recent), and potentially
- // promote it to frequent T2
- if c.t1.Contains(key) {
- c.t1.Remove(key)
- c.t2.Add(key, value)
- return
- }
-
- // Check if the value is already in T2 (frequent) and update it
- if c.t2.Contains(key) {
- c.t2.Add(key, value)
- return
- }
-
- // Check if this value was recently evicted as part of the
- // recently used list
- if c.b1.Contains(key) {
- // T1 set is too small, increase P appropriately
- delta := 1
- b1Len := c.b1.Len()
- b2Len := c.b2.Len()
- if b2Len > b1Len {
- delta = b2Len / b1Len
- }
- if c.p+delta >= c.size {
- c.p = c.size
- } else {
- c.p += delta
- }
-
- // Potentially need to make room in the cache
- if c.t1.Len()+c.t2.Len() >= c.size {
- c.replace(false)
- }
-
- // Remove from B1
- c.b1.Remove(key)
-
- // Add the key to the frequently used list
- c.t2.Add(key, value)
- return
- }
-
- // Check if this value was recently evicted as part of the
- // frequently used list
- if c.b2.Contains(key) {
- // T2 set is too small, decrease P appropriately
- delta := 1
- b1Len := c.b1.Len()
- b2Len := c.b2.Len()
- if b1Len > b2Len {
- delta = b1Len / b2Len
- }
- if delta >= c.p {
- c.p = 0
- } else {
- c.p -= delta
- }
-
- // Potentially need to make room in the cache
- if c.t1.Len()+c.t2.Len() >= c.size {
- c.replace(true)
- }
-
- // Remove from B2
- c.b2.Remove(key)
-
- // Add the key to the frequently used list
- c.t2.Add(key, value)
- return
- }
-
- // Potentially need to make room in the cache
- if c.t1.Len()+c.t2.Len() >= c.size {
- c.replace(false)
- }
-
- // Keep the size of the ghost buffers trim
- if c.b1.Len() > c.size-c.p {
- c.b1.RemoveOldest()
- }
- if c.b2.Len() > c.p {
- c.b2.RemoveOldest()
- }
-
- // Add to the recently seen list
- c.t1.Add(key, value)
-}
-
-// replace is used to adaptively evict from either T1 or T2
-// based on the current learned value of P
-func (c *ARCCache[K, V]) replace(b2ContainsKey bool) {
- t1Len := c.t1.Len()
- if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
- k, _, ok := c.t1.RemoveOldest()
- if ok {
- c.b1.Add(k, struct{}{})
- }
- } else {
- k, _, ok := c.t2.RemoveOldest()
- if ok {
- c.b2.Add(k, struct{}{})
- }
- }
-}
-
-// Len returns the number of cached entries
-func (c *ARCCache[K, V]) Len() int {
- c.lock.RLock()
- defer c.lock.RUnlock()
- return c.t1.Len() + c.t2.Len()
-}
-
-// Keys returns all the cached keys
-func (c *ARCCache[K, V]) Keys() []K {
- c.lock.RLock()
- defer c.lock.RUnlock()
- k1 := c.t1.Keys()
- k2 := c.t2.Keys()
- return append(k1, k2...)
-}
-
-// Remove is used to purge a key from the cache
-func (c *ARCCache[K, V]) Remove(key K) {
- c.lock.Lock()
- defer c.lock.Unlock()
- if c.t1.Remove(key) {
- return
- }
- if c.t2.Remove(key) {
- return
- }
- if c.b1.Remove(key) {
- return
- }
- if c.b2.Remove(key) {
- return
- }
-}
-
-// Purge is used to clear the cache
-func (c *ARCCache[K, V]) Purge() {
- c.lock.Lock()
- defer c.lock.Unlock()
- c.t1.Purge()
- c.t2.Purge()
- c.b1.Purge()
- c.b2.Purge()
-}
-
-// Contains is used to check if the cache contains a key
-// without updating recency or frequency.
-func (c *ARCCache[K, V]) Contains(key K) bool {
- c.lock.RLock()
- defer c.lock.RUnlock()
- return c.t1.Contains(key) || c.t2.Contains(key)
-}
-
-// Peek is used to inspect the cache value of a key
-// without updating recency or frequency.
-func (c *ARCCache[K, V]) Peek(key K) (value V, ok bool) {
- c.lock.RLock()
- defer c.lock.RUnlock()
- if val, ok := c.t1.Peek(key); ok {
- return val, ok
- }
- return c.t2.Peek(key)
-}
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/doc.go b/vendor/github.com/hashicorp/golang-lru/v2/doc.go
index 2474929f..24107ee0 100644
--- a/vendor/github.com/hashicorp/golang-lru/v2/doc.go
+++ b/vendor/github.com/hashicorp/golang-lru/v2/doc.go
@@ -3,21 +3,21 @@
// Package lru provides three different LRU caches of varying sophistication.
//
-// Cache is a simple LRU cache. It is based on the
-// LRU implementation in groupcache:
-// https://github.com/golang/groupcache/tree/master/lru
+// Cache is a simple LRU cache. It is based on the LRU implementation in
+// groupcache: https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
-// This avoids a burst of accesses from taking out frequently used entries,
-// at the cost of about 2x computational overhead and some extra bookkeeping.
+// This avoids a burst of accesses from taking out frequently used entries, at
+// the cost of about 2x computational overhead and some extra bookkeeping.
//
-// ARCCache is an adaptive replacement cache. It tracks recent evictions as
-// well as recent usage in both the frequent and recent caches. Its
-// computational overhead is comparable to TwoQueueCache, but the memory
-// overhead is linear with the size of the cache.
+// ARCCache is an adaptive replacement cache. It tracks recent evictions as well
+// as recent usage in both the frequent and recent caches. Its computational
+// overhead is comparable to TwoQueueCache, but the memory overhead is linear
+// with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
-// your program.
+// your program. For this reason, it is in a separate go module contained within
+// this repository.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/lru.go
index 32d04170..a2655f1f 100644
--- a/vendor/github.com/hashicorp/golang-lru/v2/lru.go
+++ b/vendor/github.com/hashicorp/golang-lru/v2/lru.go
@@ -233,6 +233,14 @@ func (c *Cache[K, V]) Keys() []K {
return keys
}
+// Values returns a slice of the values in the cache, from oldest to newest.
+func (c *Cache[K, V]) Values() []V {
+ c.lock.RLock()
+ values := c.lru.Values()
+ c.lock.RUnlock()
+ return values
+}
+
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.lock.RLock()
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go
index b6731afd..ac256645 100644
--- a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go
+++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go
@@ -129,6 +129,17 @@ func (c *LRU[K, V]) Keys() []K {
return keys
}
+// Values returns a slice of the values in the cache, from oldest to newest.
+func (c *LRU[K, V]) Values() []V {
+ values := make([]V, len(c.items))
+ i := 0
+ for ent := c.evictList.back(); ent != nil; ent = ent.prevEntry() {
+ values[i] = ent.value
+ i++
+ }
+ return values
+}
+
// Len returns the number of items in the cache.
func (c *LRU[K, V]) Len() int {
return c.evictList.length()
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go
index 3cbf02bc..043b8bcc 100644
--- a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go
+++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go
@@ -32,6 +32,9 @@ type LRUCache[K comparable, V any] interface {
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []K
+ // Values returns a slice of the values in the cache, from oldest to newest.
+ Values() []V
+
// Returns the number of items in the cache.
Len() int
diff --git a/vendor/github.com/hashicorp/golang-lru/v2/testing.go b/vendor/github.com/hashicorp/golang-lru/v2/testing.go
deleted file mode 100644
index 3277c218..00000000
--- a/vendor/github.com/hashicorp/golang-lru/v2/testing.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package lru
-
-import (
- "crypto/rand"
- "math"
- "math/big"
- "testing"
-)
-
-func getRand(tb testing.TB) int64 {
- out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
- if err != nil {
- tb.Fatal(err)
- }
- return out.Int64()
-}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go
index 11657f09..5e57995d 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_all.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_all.go
@@ -742,7 +742,6 @@ searchDict:
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>8, tableBits)
- candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s - 1)
cv = load64(src, s)
diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go
index f46adb41..544cb1e1 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_better.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_better.go
@@ -157,7 +157,6 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
index0 := base + 1
index1 := s - 2
- cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
@@ -269,18 +268,21 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+ // lTable could be postponed, but very minor difference.
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
- // index every second long in between.
- for index0 < index1 {
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
- lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
- index1 -= 2
+ index2 += 2
}
}
@@ -459,12 +461,14 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
index1 -= 1
cv = load64(src, s)
- // index every second long in between.
- for index0 < index1 {
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
- lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
- index1 -= 2
+ index2 += 2
}
}
@@ -599,7 +603,6 @@ searchDict:
if s >= sLimit {
break searchDict
}
- cv = load64(src, s)
// Index in-between
index0 := base + 1
index1 := s - 2
@@ -865,12 +868,14 @@ searchDict:
index1 -= 1
cv = load64(src, s)
- // index every second long in between.
- for index0 < index1 {
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
- lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
- index1 -= 2
+ index2 += 2
}
}
@@ -961,7 +966,6 @@ searchDict:
index0 := base + 1
index1 := s - 2
- cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
@@ -1079,12 +1083,14 @@ searchDict:
index1 -= 1
cv = load64(src, s)
- // index every second long in between.
- for index0 < index1 {
+ // Index large values sparsely in between.
+ // We do two starting from different offsets for speed.
+ index2 := (index0 + index1 + 1) >> 1
+ for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
- lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
- index1 -= 2
+ index2 += 2
}
}
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
index 9222d179..54031aa3 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -274,7 +274,6 @@ matchlen_loop_repeat_extend_encodeBlockAsm:
LEAL 8(R11), R11
CMPL R8, $0x08
JAE matchlen_loopback_repeat_extend_encodeBlockAsm
- JZ repeat_extend_forward_end_encodeBlockAsm
matchlen_match4_repeat_extend_encodeBlockAsm:
CMPL R8, $0x04
@@ -282,21 +281,21 @@ matchlen_match4_repeat_extend_encodeBlockAsm:
MOVL (R9)(R11*1), R10
CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm
- SUBL $0x04, R8
+ LEAL -4(R8), R8
LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x02
- JB matchlen_match1_repeat_extend_encodeBlockAsm
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm
+ JB repeat_extend_forward_end_encodeBlockAsm
MOVW (R9)(R11*1), R10
CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm
- SUBL $0x02, R8
LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm
matchlen_match1_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x01
- JB repeat_extend_forward_end_encodeBlockAsm
MOVB (R9)(R11*1), R10
CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm
@@ -877,7 +876,6 @@ matchlen_loop_match_nolit_encodeBlockAsm:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeBlockAsm
- JZ match_nolit_end_encodeBlockAsm
matchlen_match4_match_nolit_encodeBlockAsm:
CMPL SI, $0x04
@@ -885,21 +883,21 @@ matchlen_match4_match_nolit_encodeBlockAsm:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeBlockAsm
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm
+ JB match_nolit_end_encodeBlockAsm
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm
matchlen_match1_match_nolit_encodeBlockAsm:
- CMPL SI, $0x01
- JB match_nolit_end_encodeBlockAsm
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm
@@ -1637,7 +1635,6 @@ matchlen_loop_repeat_extend_encodeBlockAsm4MB:
LEAL 8(R11), R11
CMPL R8, $0x08
JAE matchlen_loopback_repeat_extend_encodeBlockAsm4MB
- JZ repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match4_repeat_extend_encodeBlockAsm4MB:
CMPL R8, $0x04
@@ -1645,21 +1642,21 @@ matchlen_match4_repeat_extend_encodeBlockAsm4MB:
MOVL (R9)(R11*1), R10
CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
- SUBL $0x04, R8
+ LEAL -4(R8), R8
LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x02
- JB matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ JB repeat_extend_forward_end_encodeBlockAsm4MB
MOVW (R9)(R11*1), R10
CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
- SUBL $0x02, R8
LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match1_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x01
- JB repeat_extend_forward_end_encodeBlockAsm4MB
MOVB (R9)(R11*1), R10
CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm4MB
@@ -2190,7 +2187,6 @@ matchlen_loop_match_nolit_encodeBlockAsm4MB:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeBlockAsm4MB
- JZ match_nolit_end_encodeBlockAsm4MB
matchlen_match4_match_nolit_encodeBlockAsm4MB:
CMPL SI, $0x04
@@ -2198,21 +2194,21 @@ matchlen_match4_match_nolit_encodeBlockAsm4MB:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeBlockAsm4MB
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm4MB
+ JB match_nolit_end_encodeBlockAsm4MB
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm4MB
matchlen_match1_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x01
- JB match_nolit_end_encodeBlockAsm4MB
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm4MB
@@ -2902,7 +2898,6 @@ matchlen_loop_repeat_extend_encodeBlockAsm12B:
LEAL 8(R11), R11
CMPL R8, $0x08
JAE matchlen_loopback_repeat_extend_encodeBlockAsm12B
- JZ repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match4_repeat_extend_encodeBlockAsm12B:
CMPL R8, $0x04
@@ -2910,21 +2905,21 @@ matchlen_match4_repeat_extend_encodeBlockAsm12B:
MOVL (R9)(R11*1), R10
CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
- SUBL $0x04, R8
+ LEAL -4(R8), R8
LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x02
- JB matchlen_match1_repeat_extend_encodeBlockAsm12B
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm12B
+ JB repeat_extend_forward_end_encodeBlockAsm12B
MOVW (R9)(R11*1), R10
CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
- SUBL $0x02, R8
LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match1_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x01
- JB repeat_extend_forward_end_encodeBlockAsm12B
MOVB (R9)(R11*1), R10
CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm12B
@@ -3333,7 +3328,6 @@ matchlen_loop_match_nolit_encodeBlockAsm12B:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeBlockAsm12B
- JZ match_nolit_end_encodeBlockAsm12B
matchlen_match4_match_nolit_encodeBlockAsm12B:
CMPL SI, $0x04
@@ -3341,21 +3335,21 @@ matchlen_match4_match_nolit_encodeBlockAsm12B:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm12B
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeBlockAsm12B
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm12B
+ JB match_nolit_end_encodeBlockAsm12B
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm12B
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm12B
matchlen_match1_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x01
- JB match_nolit_end_encodeBlockAsm12B
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm12B
@@ -3935,7 +3929,6 @@ matchlen_loop_repeat_extend_encodeBlockAsm10B:
LEAL 8(R11), R11
CMPL R8, $0x08
JAE matchlen_loopback_repeat_extend_encodeBlockAsm10B
- JZ repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match4_repeat_extend_encodeBlockAsm10B:
CMPL R8, $0x04
@@ -3943,21 +3936,21 @@ matchlen_match4_repeat_extend_encodeBlockAsm10B:
MOVL (R9)(R11*1), R10
CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
- SUBL $0x04, R8
+ LEAL -4(R8), R8
LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x02
- JB matchlen_match1_repeat_extend_encodeBlockAsm10B
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm10B
+ JB repeat_extend_forward_end_encodeBlockAsm10B
MOVW (R9)(R11*1), R10
CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
- SUBL $0x02, R8
LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match1_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x01
- JB repeat_extend_forward_end_encodeBlockAsm10B
MOVB (R9)(R11*1), R10
CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm10B
@@ -4366,7 +4359,6 @@ matchlen_loop_match_nolit_encodeBlockAsm10B:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeBlockAsm10B
- JZ match_nolit_end_encodeBlockAsm10B
matchlen_match4_match_nolit_encodeBlockAsm10B:
CMPL SI, $0x04
@@ -4374,21 +4366,21 @@ matchlen_match4_match_nolit_encodeBlockAsm10B:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm10B
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeBlockAsm10B
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm10B
+ JB match_nolit_end_encodeBlockAsm10B
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm10B
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm10B
matchlen_match1_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x01
- JB match_nolit_end_encodeBlockAsm10B
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm10B
@@ -4968,7 +4960,6 @@ matchlen_loop_repeat_extend_encodeBlockAsm8B:
LEAL 8(R11), R11
CMPL R8, $0x08
JAE matchlen_loopback_repeat_extend_encodeBlockAsm8B
- JZ repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match4_repeat_extend_encodeBlockAsm8B:
CMPL R8, $0x04
@@ -4976,21 +4967,21 @@ matchlen_match4_repeat_extend_encodeBlockAsm8B:
MOVL (R9)(R11*1), R10
CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
- SUBL $0x04, R8
+ LEAL -4(R8), R8
LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x02
- JB matchlen_match1_repeat_extend_encodeBlockAsm8B
+ CMPL R8, $0x01
+ JE matchlen_match1_repeat_extend_encodeBlockAsm8B
+ JB repeat_extend_forward_end_encodeBlockAsm8B
MOVW (R9)(R11*1), R10
CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
- SUBL $0x02, R8
LEAL 2(R11), R11
+ SUBL $0x02, R8
+ JZ repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match1_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x01
- JB repeat_extend_forward_end_encodeBlockAsm8B
MOVB (R9)(R11*1), R10
CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm8B
@@ -5385,7 +5376,6 @@ matchlen_loop_match_nolit_encodeBlockAsm8B:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeBlockAsm8B
- JZ match_nolit_end_encodeBlockAsm8B
matchlen_match4_match_nolit_encodeBlockAsm8B:
CMPL SI, $0x04
@@ -5393,21 +5383,21 @@ matchlen_match4_match_nolit_encodeBlockAsm8B:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm8B
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeBlockAsm8B
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeBlockAsm8B
+ JB match_nolit_end_encodeBlockAsm8B
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm8B
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeBlockAsm8B
matchlen_match1_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x01
- JB match_nolit_end_encodeBlockAsm8B
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm8B
@@ -5889,7 +5879,6 @@ matchlen_loop_match_nolit_encodeBetterBlockAsm:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm
- JZ match_nolit_end_encodeBetterBlockAsm
matchlen_match4_match_nolit_encodeBetterBlockAsm:
CMPL DI, $0x04
@@ -5897,21 +5886,21 @@ matchlen_match4_match_nolit_encodeBetterBlockAsm:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeBetterBlockAsm
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm
+ JB match_nolit_end_encodeBetterBlockAsm
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm
matchlen_match1_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x01
- JB match_nolit_end_encodeBetterBlockAsm
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm
@@ -6607,24 +6596,26 @@ match_nolit_dst_ok_encodeBetterBlockAsm:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 524312(SP)(R10*4)
MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeBetterBlockAsm
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x08, DI
- IMULQ BX, DI
- SHRQ $0x2f, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x08, R9
IMULQ BX, R9
SHRQ $0x2f, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeBetterBlockAsm
emit_remainder_encodeBetterBlockAsm:
@@ -6960,7 +6951,6 @@ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB
- JZ match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
CMPL DI, $0x04
@@ -6968,21 +6958,21 @@ matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ JB match_nolit_end_encodeBetterBlockAsm4MB
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x01
- JB match_nolit_end_encodeBetterBlockAsm4MB
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm4MB
@@ -7620,24 +7610,26 @@ match_nolit_dst_ok_encodeBetterBlockAsm4MB:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 524312(SP)(R10*4)
MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm4MB:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeBetterBlockAsm4MB
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x08, DI
- IMULQ BX, DI
- SHRQ $0x2f, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x08, R9
IMULQ BX, R9
SHRQ $0x2f, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeBetterBlockAsm4MB
emit_remainder_encodeBetterBlockAsm4MB:
@@ -7957,7 +7949,6 @@ matchlen_loop_match_nolit_encodeBetterBlockAsm12B:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B
- JZ match_nolit_end_encodeBetterBlockAsm12B
matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
CMPL DI, $0x04
@@ -7965,21 +7956,21 @@ matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ JB match_nolit_end_encodeBetterBlockAsm12B
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm12B
matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x01
- JB match_nolit_end_encodeBetterBlockAsm12B
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm12B
@@ -8478,24 +8469,26 @@ match_nolit_dst_ok_encodeBetterBlockAsm12B:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 65560(SP)(R10*4)
MOVL R13, 65560(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm12B:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeBetterBlockAsm12B
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x10, DI
- IMULQ BX, DI
- SHRQ $0x32, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x10, R9
IMULQ BX, R9
SHRQ $0x32, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeBetterBlockAsm12B
emit_remainder_encodeBetterBlockAsm12B:
@@ -8807,7 +8800,6 @@ matchlen_loop_match_nolit_encodeBetterBlockAsm10B:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B
- JZ match_nolit_end_encodeBetterBlockAsm10B
matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
CMPL DI, $0x04
@@ -8815,21 +8807,21 @@ matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ JB match_nolit_end_encodeBetterBlockAsm10B
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm10B
matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x01
- JB match_nolit_end_encodeBetterBlockAsm10B
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm10B
@@ -9328,24 +9320,26 @@ match_nolit_dst_ok_encodeBetterBlockAsm10B:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 16408(SP)(R10*4)
MOVL R13, 16408(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm10B:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeBetterBlockAsm10B
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x10, DI
- IMULQ BX, DI
- SHRQ $0x34, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x10, R9
IMULQ BX, R9
SHRQ $0x34, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeBetterBlockAsm10B
emit_remainder_encodeBetterBlockAsm10B:
@@ -9657,7 +9651,6 @@ matchlen_loop_match_nolit_encodeBetterBlockAsm8B:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B
- JZ match_nolit_end_encodeBetterBlockAsm8B
matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
CMPL DI, $0x04
@@ -9665,21 +9658,21 @@ matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ JB match_nolit_end_encodeBetterBlockAsm8B
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeBetterBlockAsm8B
matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x01
- JB match_nolit_end_encodeBetterBlockAsm8B
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm8B
@@ -10164,24 +10157,26 @@ match_nolit_dst_ok_encodeBetterBlockAsm8B:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 4120(SP)(R10*4)
MOVL R13, 4120(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm8B:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeBetterBlockAsm8B
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x10, DI
- IMULQ BX, DI
- SHRQ $0x36, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x10, R9
IMULQ BX, R9
SHRQ $0x36, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeBetterBlockAsm8B
emit_remainder_encodeBetterBlockAsm8B:
@@ -10605,7 +10600,6 @@ matchlen_loop_repeat_extend_encodeSnappyBlockAsm:
LEAL 8(R10), R10
CMPL DI, $0x08
JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
CMPL DI, $0x04
@@ -10613,21 +10607,21 @@ matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
MOVL (R8)(R10*1), R9
CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x02
- JB matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm
MOVW (R8)(R10*1), R9
CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
- SUBL $0x02, DI
LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x01
- JB repeat_extend_forward_end_encodeSnappyBlockAsm
MOVB (R8)(R10*1), R9
CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm
@@ -10928,7 +10922,6 @@ matchlen_loop_match_nolit_encodeSnappyBlockAsm:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm
- JZ match_nolit_end_encodeSnappyBlockAsm
matchlen_match4_match_nolit_encodeSnappyBlockAsm:
CMPL SI, $0x04
@@ -10936,21 +10929,21 @@ matchlen_match4_match_nolit_encodeSnappyBlockAsm:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ JB match_nolit_end_encodeSnappyBlockAsm
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm
matchlen_match1_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x01
- JB match_nolit_end_encodeSnappyBlockAsm
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm
@@ -11469,7 +11462,6 @@ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K:
LEAL 8(R10), R10
CMPL DI, $0x08
JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
CMPL DI, $0x04
@@ -11477,21 +11469,21 @@ matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
MOVL (R8)(R10*1), R9
CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x02
- JB matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm64K
MOVW (R8)(R10*1), R9
CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
- SUBL $0x02, DI
LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x01
- JB repeat_extend_forward_end_encodeSnappyBlockAsm64K
MOVB (R8)(R10*1), R9
CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
@@ -11752,7 +11744,6 @@ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K
- JZ match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
CMPL SI, $0x04
@@ -11760,21 +11751,21 @@ matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ JB match_nolit_end_encodeSnappyBlockAsm64K
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x01
- JB match_nolit_end_encodeSnappyBlockAsm64K
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm64K
@@ -12253,7 +12244,6 @@ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B:
LEAL 8(R10), R10
CMPL DI, $0x08
JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
CMPL DI, $0x04
@@ -12261,21 +12251,21 @@ matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
MOVL (R8)(R10*1), R9
CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x02
- JB matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm12B
MOVW (R8)(R10*1), R9
CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
- SUBL $0x02, DI
LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x01
- JB repeat_extend_forward_end_encodeSnappyBlockAsm12B
MOVB (R8)(R10*1), R9
CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
@@ -12536,7 +12526,6 @@ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B
- JZ match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
CMPL SI, $0x04
@@ -12544,21 +12533,21 @@ matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ JB match_nolit_end_encodeSnappyBlockAsm12B
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x01
- JB match_nolit_end_encodeSnappyBlockAsm12B
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm12B
@@ -13037,7 +13026,6 @@ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B:
LEAL 8(R10), R10
CMPL DI, $0x08
JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
CMPL DI, $0x04
@@ -13045,21 +13033,21 @@ matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
MOVL (R8)(R10*1), R9
CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x02
- JB matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm10B
MOVW (R8)(R10*1), R9
CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
- SUBL $0x02, DI
LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x01
- JB repeat_extend_forward_end_encodeSnappyBlockAsm10B
MOVB (R8)(R10*1), R9
CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
@@ -13320,7 +13308,6 @@ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B
- JZ match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
CMPL SI, $0x04
@@ -13328,21 +13315,21 @@ matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ JB match_nolit_end_encodeSnappyBlockAsm10B
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x01
- JB match_nolit_end_encodeSnappyBlockAsm10B
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm10B
@@ -13821,7 +13808,6 @@ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B:
LEAL 8(R10), R10
CMPL DI, $0x08
JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
CMPL DI, $0x04
@@ -13829,21 +13815,21 @@ matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
MOVL (R8)(R10*1), R9
CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x02
- JB matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ JB repeat_extend_forward_end_encodeSnappyBlockAsm8B
MOVW (R8)(R10*1), R9
CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
- SUBL $0x02, DI
LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x01
- JB repeat_extend_forward_end_encodeSnappyBlockAsm8B
MOVB (R8)(R10*1), R9
CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
@@ -14102,7 +14088,6 @@ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B
- JZ match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
CMPL SI, $0x04
@@ -14110,21 +14095,21 @@ matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ JB match_nolit_end_encodeSnappyBlockAsm8B
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x01
- JB match_nolit_end_encodeSnappyBlockAsm8B
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm8B
@@ -14513,7 +14498,6 @@ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm
- JZ match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
CMPL DI, $0x04
@@ -14521,21 +14505,21 @@ matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ JB match_nolit_end_encodeSnappyBetterBlockAsm
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x01
- JB match_nolit_end_encodeSnappyBetterBlockAsm
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm
@@ -14790,24 +14774,26 @@ match_nolit_dst_ok_encodeSnappyBetterBlockAsm:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 524312(SP)(R10*4)
MOVL R13, 524312(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeSnappyBetterBlockAsm
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x08, DI
- IMULQ BX, DI
- SHRQ $0x2f, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x08, R9
IMULQ BX, R9
SHRQ $0x2f, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x2f, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeSnappyBetterBlockAsm
emit_remainder_encodeSnappyBetterBlockAsm:
@@ -15135,7 +15121,6 @@ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K
- JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
CMPL DI, $0x04
@@ -15143,21 +15128,21 @@ matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ JB match_nolit_end_encodeSnappyBetterBlockAsm64K
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x01
- JB match_nolit_end_encodeSnappyBetterBlockAsm64K
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
@@ -15363,24 +15348,26 @@ match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 262168(SP)(R10*4)
MOVL R13, 262168(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm64K:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeSnappyBetterBlockAsm64K
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x08, DI
- IMULQ BX, DI
- SHRQ $0x30, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x08, R9
IMULQ BX, R9
SHRQ $0x30, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x08, R10
+ IMULQ BX, R10
+ SHRQ $0x30, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeSnappyBetterBlockAsm64K
emit_remainder_encodeSnappyBetterBlockAsm64K:
@@ -15692,7 +15679,6 @@ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B
- JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
CMPL DI, $0x04
@@ -15700,21 +15686,21 @@ matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm12B
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x01
- JB match_nolit_end_encodeSnappyBetterBlockAsm12B
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
@@ -15920,24 +15906,26 @@ match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 65560(SP)(R10*4)
MOVL R13, 65560(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm12B:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeSnappyBetterBlockAsm12B
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x10, DI
- IMULQ BX, DI
- SHRQ $0x32, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x10, R9
IMULQ BX, R9
SHRQ $0x32, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeSnappyBetterBlockAsm12B
emit_remainder_encodeSnappyBetterBlockAsm12B:
@@ -16249,7 +16237,6 @@ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B
- JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
CMPL DI, $0x04
@@ -16257,21 +16244,21 @@ matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm10B
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x01
- JB match_nolit_end_encodeSnappyBetterBlockAsm10B
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
@@ -16477,24 +16464,26 @@ match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 16408(SP)(R10*4)
MOVL R13, 16408(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm10B:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeSnappyBetterBlockAsm10B
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x10, DI
- IMULQ BX, DI
- SHRQ $0x34, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x10, R9
IMULQ BX, R9
SHRQ $0x34, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeSnappyBetterBlockAsm10B
emit_remainder_encodeSnappyBetterBlockAsm10B:
@@ -16806,7 +16795,6 @@ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B:
LEAL 8(R11), R11
CMPL DI, $0x08
JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B
- JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
CMPL DI, $0x04
@@ -16814,21 +16802,21 @@ matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
MOVL (R8)(R11*1), R10
CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x02
- JB matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ CMPL DI, $0x01
+ JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ JB match_nolit_end_encodeSnappyBetterBlockAsm8B
MOVW (R8)(R11*1), R10
CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
- SUBL $0x02, DI
LEAL 2(R11), R11
+ SUBL $0x02, DI
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x01
- JB match_nolit_end_encodeSnappyBetterBlockAsm8B
MOVB (R8)(R11*1), R10
CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
@@ -17032,24 +17020,26 @@ match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B:
MOVL R8, 24(SP)(R11*4)
MOVL DI, 4120(SP)(R10*4)
MOVL R13, 4120(SP)(R12*4)
+ LEAQ 1(R8)(SI*1), DI
+ SHRQ $0x01, DI
ADDQ $0x01, SI
SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm8B:
- CMPQ SI, R8
+ CMPQ DI, R8
JAE search_loop_encodeSnappyBetterBlockAsm8B
- MOVQ (DX)(SI*1), DI
- MOVQ (DX)(R8*1), R9
- SHLQ $0x10, DI
- IMULQ BX, DI
- SHRQ $0x36, DI
+ MOVQ (DX)(SI*1), R9
+ MOVQ (DX)(DI*1), R10
SHLQ $0x10, R9
IMULQ BX, R9
SHRQ $0x36, R9
- MOVL SI, 24(SP)(DI*4)
- MOVL R8, 24(SP)(R9*4)
+ SHLQ $0x10, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL SI, 24(SP)(R9*4)
+ MOVL DI, 24(SP)(R10*4)
ADDQ $0x02, SI
- SUBQ $0x02, R8
+ ADDQ $0x02, DI
JMP index_loop_encodeSnappyBetterBlockAsm8B
emit_remainder_encodeSnappyBetterBlockAsm8B:
@@ -17378,7 +17368,6 @@ matchlen_loop_repeat_extend_calcBlockSize:
LEAL 8(R10), R10
CMPL DI, $0x08
JAE matchlen_loopback_repeat_extend_calcBlockSize
- JZ repeat_extend_forward_end_calcBlockSize
matchlen_match4_repeat_extend_calcBlockSize:
CMPL DI, $0x04
@@ -17386,21 +17375,21 @@ matchlen_match4_repeat_extend_calcBlockSize:
MOVL (R8)(R10*1), R9
CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_calcBlockSize
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R10), R10
matchlen_match2_repeat_extend_calcBlockSize:
- CMPL DI, $0x02
- JB matchlen_match1_repeat_extend_calcBlockSize
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_calcBlockSize
+ JB repeat_extend_forward_end_calcBlockSize
MOVW (R8)(R10*1), R9
CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_calcBlockSize
- SUBL $0x02, DI
LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_calcBlockSize
matchlen_match1_repeat_extend_calcBlockSize:
- CMPL DI, $0x01
- JB repeat_extend_forward_end_calcBlockSize
MOVB (R8)(R10*1), R9
CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_calcBlockSize
@@ -17590,7 +17579,6 @@ matchlen_loop_match_nolit_calcBlockSize:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_calcBlockSize
- JZ match_nolit_end_calcBlockSize
matchlen_match4_match_nolit_calcBlockSize:
CMPL SI, $0x04
@@ -17598,21 +17586,21 @@ matchlen_match4_match_nolit_calcBlockSize:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_calcBlockSize
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_calcBlockSize:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_calcBlockSize
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_calcBlockSize
+ JB match_nolit_end_calcBlockSize
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_calcBlockSize
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_calcBlockSize
matchlen_match1_match_nolit_calcBlockSize:
- CMPL SI, $0x01
- JB match_nolit_end_calcBlockSize
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_calcBlockSize
@@ -17909,7 +17897,6 @@ matchlen_loop_repeat_extend_calcBlockSizeSmall:
LEAL 8(R10), R10
CMPL DI, $0x08
JAE matchlen_loopback_repeat_extend_calcBlockSizeSmall
- JZ repeat_extend_forward_end_calcBlockSizeSmall
matchlen_match4_repeat_extend_calcBlockSizeSmall:
CMPL DI, $0x04
@@ -17917,21 +17904,21 @@ matchlen_match4_repeat_extend_calcBlockSizeSmall:
MOVL (R8)(R10*1), R9
CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_calcBlockSizeSmall
- SUBL $0x04, DI
+ LEAL -4(DI), DI
LEAL 4(R10), R10
matchlen_match2_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x02
- JB matchlen_match1_repeat_extend_calcBlockSizeSmall
+ CMPL DI, $0x01
+ JE matchlen_match1_repeat_extend_calcBlockSizeSmall
+ JB repeat_extend_forward_end_calcBlockSizeSmall
MOVW (R8)(R10*1), R9
CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_calcBlockSizeSmall
- SUBL $0x02, DI
LEAL 2(R10), R10
+ SUBL $0x02, DI
+ JZ repeat_extend_forward_end_calcBlockSizeSmall
matchlen_match1_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x01
- JB repeat_extend_forward_end_calcBlockSizeSmall
MOVB (R8)(R10*1), R9
CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_calcBlockSizeSmall
@@ -18091,7 +18078,6 @@ matchlen_loop_match_nolit_calcBlockSizeSmall:
LEAL 8(R9), R9
CMPL SI, $0x08
JAE matchlen_loopback_match_nolit_calcBlockSizeSmall
- JZ match_nolit_end_calcBlockSizeSmall
matchlen_match4_match_nolit_calcBlockSizeSmall:
CMPL SI, $0x04
@@ -18099,21 +18085,21 @@ matchlen_match4_match_nolit_calcBlockSizeSmall:
MOVL (DI)(R9*1), R8
CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_calcBlockSizeSmall
- SUBL $0x04, SI
+ LEAL -4(SI), SI
LEAL 4(R9), R9
matchlen_match2_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x02
- JB matchlen_match1_match_nolit_calcBlockSizeSmall
+ CMPL SI, $0x01
+ JE matchlen_match1_match_nolit_calcBlockSizeSmall
+ JB match_nolit_end_calcBlockSizeSmall
MOVW (DI)(R9*1), R8
CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_calcBlockSizeSmall
- SUBL $0x02, SI
LEAL 2(R9), R9
+ SUBL $0x02, SI
+ JZ match_nolit_end_calcBlockSizeSmall
matchlen_match1_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x01
- JB match_nolit_end_calcBlockSizeSmall
MOVB (DI)(R9*1), R8
CMPB (BX)(R9*1), R8
JNE match_nolit_end_calcBlockSizeSmall
@@ -18879,7 +18865,6 @@ matchlen_loop_standalone:
LEAL 8(SI), SI
CMPL DX, $0x08
JAE matchlen_loopback_standalone
- JZ gen_match_len_end
matchlen_match4_standalone:
CMPL DX, $0x04
@@ -18887,21 +18872,21 @@ matchlen_match4_standalone:
MOVL (AX)(SI*1), BX
CMPL (CX)(SI*1), BX
JNE matchlen_match2_standalone
- SUBL $0x04, DX
+ LEAL -4(DX), DX
LEAL 4(SI), SI
matchlen_match2_standalone:
- CMPL DX, $0x02
- JB matchlen_match1_standalone
+ CMPL DX, $0x01
+ JE matchlen_match1_standalone
+ JB gen_match_len_end
MOVW (AX)(SI*1), BX
CMPW (CX)(SI*1), BX
JNE matchlen_match1_standalone
- SUBL $0x02, DX
LEAL 2(SI), SI
+ SUBL $0x02, DX
+ JZ gen_match_len_end
matchlen_match1_standalone:
- CMPL DX, $0x01
- JB gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go
index 5a944068..089cd36d 100644
--- a/vendor/github.com/klauspost/compress/s2/writer.go
+++ b/vendor/github.com/klauspost/compress/s2/writer.go
@@ -771,7 +771,7 @@ func (w *Writer) closeIndex(idx bool) ([]byte, error) {
}
var index []byte
- if w.err(nil) == nil && w.writer != nil {
+ if w.err(err) == nil && w.writer != nil {
// Create index.
if idx {
compSize := int64(-1)
diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md
index 37b5167d..accd7aba 100644
--- a/vendor/github.com/klauspost/cpuid/v2/README.md
+++ b/vendor/github.com/klauspost/cpuid/v2/README.md
@@ -435,6 +435,7 @@ Exit Code 1
| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
| SYSEE | SYSENTER and SYSEXIT instructions |
| TBM | AMD Trailing Bit Manipulation |
+| TDX_GUEST | Intel Trust Domain Extensions Guest |
| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
index 89a861d4..d015c744 100644
--- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
@@ -226,6 +226,7 @@ const (
SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
SYSEE // SYSENTER and SYSEXIT instructions
TBM // AMD Trailing Bit Manipulation
+ TDX_GUEST // Intel Trust Domain Extensions Guest
TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
@@ -1186,13 +1187,8 @@ func support() flagSet {
fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
- // CPUID.(EAX=7, ECX=1).EDX
- fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8)
- fs.setIf(edx&(1<<5) != 0, AVXNECONVERT)
- fs.setIf(edx&(1<<14) != 0, PREFETCHI)
-
// CPUID.(EAX=7, ECX=1).EAX
- eax1, _, _, _ := cpuidex(7, 1)
+ eax1, _, _, edx1 := cpuidex(7, 1)
fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
@@ -1202,6 +1198,11 @@ func support() flagSet {
fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
fs.setIf(eax1&(1<<26) != 0, LAM)
+ // CPUID.(EAX=7, ECX=1).EDX
+ fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
+ fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
+ fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
+
// Only detect AVX-512 features if XGETBV is supported
if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
// Check for OS support
@@ -1393,6 +1394,13 @@ func support() flagSet {
fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
}
+ if mfi >= 0x21 {
+ // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21).
+ _, ebx, ecx, edx := cpuid(0x21)
+ identity := string(valAsString(ebx, edx, ecx))
+ fs.setIf(identity == "IntelTDX ", TDX_GUEST)
+ }
+
return fs
}
diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
index 2a27f44d..024c706a 100644
--- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
+++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
@@ -166,59 +166,60 @@ func _() {
_ = x[SYSCALL-156]
_ = x[SYSEE-157]
_ = x[TBM-158]
- _ = x[TLB_FLUSH_NESTED-159]
- _ = x[TME-160]
- _ = x[TOPEXT-161]
- _ = x[TSCRATEMSR-162]
- _ = x[TSXLDTRK-163]
- _ = x[VAES-164]
- _ = x[VMCBCLEAN-165]
- _ = x[VMPL-166]
- _ = x[VMSA_REGPROT-167]
- _ = x[VMX-168]
- _ = x[VPCLMULQDQ-169]
- _ = x[VTE-170]
- _ = x[WAITPKG-171]
- _ = x[WBNOINVD-172]
- _ = x[WRMSRNS-173]
- _ = x[X87-174]
- _ = x[XGETBV1-175]
- _ = x[XOP-176]
- _ = x[XSAVE-177]
- _ = x[XSAVEC-178]
- _ = x[XSAVEOPT-179]
- _ = x[XSAVES-180]
- _ = x[AESARM-181]
- _ = x[ARMCPUID-182]
- _ = x[ASIMD-183]
- _ = x[ASIMDDP-184]
- _ = x[ASIMDHP-185]
- _ = x[ASIMDRDM-186]
- _ = x[ATOMICS-187]
- _ = x[CRC32-188]
- _ = x[DCPOP-189]
- _ = x[EVTSTRM-190]
- _ = x[FCMA-191]
- _ = x[FP-192]
- _ = x[FPHP-193]
- _ = x[GPA-194]
- _ = x[JSCVT-195]
- _ = x[LRCPC-196]
- _ = x[PMULL-197]
- _ = x[SHA1-198]
- _ = x[SHA2-199]
- _ = x[SHA3-200]
- _ = x[SHA512-201]
- _ = x[SM3-202]
- _ = x[SM4-203]
- _ = x[SVE-204]
- _ = x[lastID-205]
+ _ = x[TDX_GUEST-159]
+ _ = x[TLB_FLUSH_NESTED-160]
+ _ = x[TME-161]
+ _ = x[TOPEXT-162]
+ _ = x[TSCRATEMSR-163]
+ _ = x[TSXLDTRK-164]
+ _ = x[VAES-165]
+ _ = x[VMCBCLEAN-166]
+ _ = x[VMPL-167]
+ _ = x[VMSA_REGPROT-168]
+ _ = x[VMX-169]
+ _ = x[VPCLMULQDQ-170]
+ _ = x[VTE-171]
+ _ = x[WAITPKG-172]
+ _ = x[WBNOINVD-173]
+ _ = x[WRMSRNS-174]
+ _ = x[X87-175]
+ _ = x[XGETBV1-176]
+ _ = x[XOP-177]
+ _ = x[XSAVE-178]
+ _ = x[XSAVEC-179]
+ _ = x[XSAVEOPT-180]
+ _ = x[XSAVES-181]
+ _ = x[AESARM-182]
+ _ = x[ARMCPUID-183]
+ _ = x[ASIMD-184]
+ _ = x[ASIMDDP-185]
+ _ = x[ASIMDHP-186]
+ _ = x[ASIMDRDM-187]
+ _ = x[ATOMICS-188]
+ _ = x[CRC32-189]
+ _ = x[DCPOP-190]
+ _ = x[EVTSTRM-191]
+ _ = x[FCMA-192]
+ _ = x[FP-193]
+ _ = x[FPHP-194]
+ _ = x[GPA-195]
+ _ = x[JSCVT-196]
+ _ = x[LRCPC-197]
+ _ = x[PMULL-198]
+ _ = x[SHA1-199]
+ _ = x[SHA2-200]
+ _ = x[SHA3-201]
+ _ = x[SHA512-202]
+ _ = x[SM3-203]
+ _ = x[SM4-204]
+ _ = x[SVE-205]
+ _ = x[lastID-206]
_ = x[firstID-0]
}
-const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
+const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
-var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1198, 1201, 1207, 1217, 1225, 1229, 1238, 1242, 1254, 1257, 1267, 1270, 1277, 1285, 1292, 1295, 1302, 1305, 1310, 1316, 1324, 1330, 1336, 1344, 1349, 1356, 1363, 1371, 1378, 1383, 1388, 1395, 1399, 1401, 1405, 1408, 1413, 1418, 1423, 1427, 1431, 1435, 1441, 1444, 1447, 1450, 1456}
+var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1191, 1207, 1210, 1216, 1226, 1234, 1238, 1247, 1251, 1263, 1266, 1276, 1279, 1286, 1294, 1301, 1304, 1311, 1314, 1319, 1325, 1333, 1339, 1345, 1353, 1358, 1365, 1372, 1380, 1387, 1392, 1397, 1404, 1408, 1410, 1414, 1417, 1422, 1427, 1432, 1436, 1440, 1444, 1450, 1453, 1456, 1459, 1465}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
index 9016ec4b..0ae9142e 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
@@ -62,7 +62,7 @@ type PutObjectFanOutResponse struct {
ETag string `json:"etag,omitempty"`
VersionID string `json:"versionId,omitempty"`
LastModified *time.Time `json:"lastModified,omitempty"`
- Error error `json:"error,omitempty"`
+ Error string `json:"error,omitempty"`
}
// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
index 85d6c70a..5f117afa 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -389,8 +389,9 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
headers := opts.Header()
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
- headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
- headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
+ headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
+ headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
+ headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload
}
// Instantiate all the complete multipart buffer.
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index 0546b1ac..712fe2fd 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -124,7 +124,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.55"
+ libraryVersion = "v7.0.57"
)
// User Agent should always following the below style.
@@ -919,7 +919,7 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is
if h, p, err := net.SplitHostPort(host); err == nil {
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
host = h
- if ip := net.ParseIP(h); ip != nil && ip.To16() != nil {
+ if ip := net.ParseIP(h); ip != nil && ip.To4() == nil {
host = "[" + h + "]"
}
}
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index ed1b7340..2bc6b864 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -2539,13 +2539,13 @@ func testTrailingChecksums() {
test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher)
// Set correct CRC.
- c.TraceOn(os.Stdout)
+ // c.TraceOn(os.Stderr)
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO)
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
- c.TraceOff()
+ // c.TraceOff()
cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
@@ -2655,8 +2655,8 @@ func testPutObjectWithAutomaticChecksums() {
}
// Enable tracing, write to stderr.
- c.TraceOn(os.Stderr)
- defer c.TraceOff()
+ // c.TraceOn(os.Stderr)
+ // defer c.TraceOff()
for i, test := range tests {
bufSize := dataFileMap["datafile-10-kB"]
@@ -4821,6 +4821,11 @@ func testPresignedPostPolicy() {
policy.SetContentType("binary/octet-stream")
policy.SetContentLengthRange(10, 1024*1024)
policy.SetUserMetadata(metadataKey, metadataValue)
+
+ // Add CRC32C
+ checksum := minio.ChecksumCRC32C.ChecksumBytes(buf)
+ policy.SetChecksum(checksum)
+
args["policy"] = policy.String()
presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy)
@@ -4888,6 +4893,7 @@ func testPresignedPostPolicy() {
Timeout: 30 * time.Second,
Transport: transport,
}
+ args["url"] = presignedPostPolicyURL.String()
req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes()))
if err != nil {
@@ -4920,13 +4926,21 @@ func testPresignedPostPolicy() {
expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName
- if val, ok := res.Header["Location"]; ok {
- if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
- logError(testName, function, args, startTime, "", "Location in header response is incorrect", err)
+ if !strings.Contains(expectedLocation, "s3.amazonaws.com/") {
+ // Test when not against AWS S3.
+ if val, ok := res.Header["Location"]; ok {
+ if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err)
+ return
+ }
+ } else {
+ logError(testName, function, args, startTime, "", "Location not found in header response", err)
return
}
- } else {
- logError(testName, function, args, startTime, "", "Location not found in header response", err)
+ }
+ want := checksum.Encoded()
+ if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil)
return
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 12331542..deedc2df 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -18,12 +18,12 @@ import (
"sort"
"strings"
- "github.com/prometheus/client_golang/prometheus/internal"
-
"github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 5b69965b..8d818afe 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -401,7 +401,7 @@ type HistogramOpts struct {
// Histogram by a Prometheus server with that feature enabled (requires
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
// the whole float64 range (with the exception of the “zero” bucket, see
- // SparseBucketsZeroThreshold below). From any one bucket to the next,
+ // NativeHistogramZeroThreshold below). From any one bucket to the next,
// the width of the bucket grows by a constant
// factor. NativeHistogramBucketFactor provides an upper bound for this
// factor (exception see below). The smaller
@@ -432,7 +432,7 @@ type HistogramOpts struct {
// bucket. For best results, this should be close to a bucket
// boundary. This is usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero,
- // DefSparseBucketsZeroThreshold is used as the threshold. To configure
+ // DefNativeHistogramZeroThreshold is used as the threshold. To configure
// a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
@@ -639,8 +639,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
if frac == 0.5 {
key--
}
- div := 1 << -schema
- key = (key + div - 1) / div
+ offset := (1 << -schema) - 1
+ key = (key + offset) >> -schema
}
if isInf {
key++
@@ -817,7 +817,7 @@ func (h *histogram) observe(v float64, bucket int) {
}
}
-// limitSparsebuckets applies a strategy to limit the number of populated sparse
+// limitBuckets applies a strategy to limit the number of populated sparse
// buckets. It's generally best effort, and there are situations where the
// number can go higher (if even the lowest resolution isn't enough to reduce
// the number sufficiently, or if the provided counts aren't fully updated yet
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index a4cc9810..09b8d2fb 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -37,6 +37,7 @@ import (
"fmt"
"io"
"net/http"
+ "strconv"
"strings"
"sync"
"time"
@@ -47,9 +48,10 @@ import (
)
const (
- contentTypeHeader = "Content-Type"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
+ contentTypeHeader = "Content-Type"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+ processStartTimeHeader = "Process-Start-Time-Unix"
)
var gzipPool = sync.Pool{
@@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+ if !opts.ProcessStartTime.IsZero() {
+ rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
+ }
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
@@ -366,6 +371,14 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
+ // ProcessStartTime allows setting process start timevalue that will be exposed
+ // with "Process-Start-Time-Unix" response header along with the metrics
+ // payload. This allow callers to have efficient transformations to cumulative
+ // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
+ // scrape target.
+ // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
+ // exposition format.
+ ProcessStartTime time.Time
}
// gzipAccepted returns whether the client will accept gzip-encoded content.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 386fb2d2..f0d0015a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -20,6 +20,24 @@ import (
"github.com/prometheus/common/model"
)
+var labelsPool = &sync.Pool{
+ New: func() interface{} {
+ return make(Labels)
+ },
+}
+
+func getLabelsFromPool() Labels {
+ return labelsPool.Get().(Labels)
+}
+
+func putLabelsToPool(labels Labels) {
+ for k := range labels {
+ delete(labels, k)
+ }
+
+ labelsPool.Put(labels)
+}
+
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
@@ -93,6 +111,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
labels = constrainLabels(m.desc, labels)
+ defer putLabelsToPool(labels)
+
h, err := m.hashLabels(labels)
if err != nil {
return false
@@ -109,6 +129,8 @@ func (m *MetricVec) Delete(labels Labels) bool {
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels = constrainLabels(m.desc, labels)
+ defer putLabelsToPool(labels)
+
return m.metricMap.deleteByLabels(labels, m.curry)
}
@@ -229,6 +251,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
labels = constrainLabels(m.desc, labels)
+ defer putLabelsToPool(labels)
+
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@@ -647,15 +671,16 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
}
func constrainLabels(desc *Desc, labels Labels) Labels {
- constrainedValues := make(Labels, len(labels))
+ constrainedLabels := getLabelsFromPool()
for l, v := range labels {
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
- constrainedValues[l] = desc.variableLabels[i].Constrain(v)
- continue
+ v = desc.variableLabels[i].Constrain(v)
}
- constrainedValues[l] = v
+
+ constrainedLabels[l] = v
}
- return constrainedValues
+
+ return constrainedLabels
}
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go
index 8c35b172..9bed2419 100644
--- a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go
+++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go
@@ -11,6 +11,9 @@ func Sleep(ctx context.Context, interval time.Duration) error {
timer := time.NewTimer(interval)
select {
case <-ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
return ctx.Err()
case <-timer.C:
return nil
diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go
index 56fb1cc6..059518dc 100644
--- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go
+++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go
@@ -154,13 +154,13 @@ func fillFromMeminfoWithContext() (*VirtualMemoryStat, *VirtualMemoryExStat, err
return ret, retEx, err
}
retEx.Unevictable = t * 1024
- case "WriteBack":
+ case "Writeback":
t, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return ret, retEx, err
}
ret.WriteBack = t * 1024
- case "WriteBackTmp":
+ case "WritebackTmp":
t, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return ret, retEx, err
diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go
index 1c8d4f4e..4d2cfbcd 100644
--- a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go
+++ b/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go
@@ -278,7 +278,7 @@ func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackSta
return nil, common.ErrNotImplementedError
}
-// NetProtoCounters returns network statistics for the entire system
+// ProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Not Implemented for Darwin
diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go
index 7f31851e..bf8baf09 100644
--- a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go
+++ b/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go
@@ -115,7 +115,7 @@ func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackSta
return nil, common.ErrNotImplementedError
}
-// NetProtoCounters returns network statistics for the entire system
+// ProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Not Implemented for FreeBSD
diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go
index c7cd0db1..dd62d479 100644
--- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go
+++ b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go
@@ -157,7 +157,7 @@ var netProtocols = []string{
"udplite",
}
-// NetProtoCounters returns network statistics for the entire system
+// ProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Available protocols:
diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go
index 5f066a09..cf48f53e 100644
--- a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go
+++ b/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go
@@ -164,7 +164,7 @@ func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackSta
return nil, common.ErrNotImplementedError
}
-// NetProtoCounters returns network statistics for the entire system
+// ProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Not Implemented for OpenBSD
diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go
index 68b26bdc..5d384342 100644
--- a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go
+++ b/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go
@@ -338,7 +338,7 @@ func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackSta
return nil, common.ErrNotImplementedError
}
-// NetProtoCounters returns network statistics for the entire system
+// ProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Not Implemented for Windows
diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process.go b/vendor/github.com/shirou/gopsutil/v3/process/process.go
index 0ca26c21..1a7fe1b8 100644
--- a/vendor/github.com/shirou/gopsutil/v3/process/process.go
+++ b/vendor/github.com/shirou/gopsutil/v3/process/process.go
@@ -335,7 +335,7 @@ func (p *Process) MemoryPercentWithContext(ctx context.Context) (float32, error)
return (100 * float32(used) / float32(total)), nil
}
-// CPU_Percent returns how many percent of the CPU time this process uses
+// CPUPercent returns how many percent of the CPU time this process uses
func (p *Process) CPUPercent() (float64, error) {
return p.CPUPercentWithContext(context.Background())
}
@@ -507,7 +507,7 @@ func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) {
return p.MemoryInfoExWithContext(context.Background())
}
-// PageFaultsInfo returns the process's page fault counters.
+// PageFaults returns the process's page fault counters.
func (p *Process) PageFaults() (*PageFaultsStat, error) {
return p.PageFaultsWithContext(context.Background())
}
@@ -530,7 +530,7 @@ func (p *Process) Connections() ([]net.ConnectionStat, error) {
return p.ConnectionsWithContext(context.Background())
}
-// Connections returns a slice of net.ConnectionStat used by the process at most `max`.
+// ConnectionsMax returns a slice of net.ConnectionStat used by the process at most `max`.
func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) {
return p.ConnectionsMaxWithContext(context.Background(), max)
}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
index 72e8e3a1..074fd4b8 100644
--- a/vendor/github.com/sirupsen/logrus/writer.go
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -4,6 +4,7 @@ import (
"bufio"
"io"
"runtime"
+ "strings"
)
// Writer at INFO level. See WriterLevel for details.
@@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
+// Writer returns an io.Writer that writes to the logger at the info log level
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
+// WriterLevel returns an io.Writer that writes to the logger at the given log level
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
+ // Determine which log function to use based on the specified log level
switch level {
case TraceLevel:
printFunc = entry.Trace
@@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
printFunc = entry.Print
}
+ // Start a new goroutine to scan the input and write it to the logger using the specified print function.
+ // It splits the input into chunks of up to 64KB to avoid buffer overflows.
go entry.writerScanner(reader, printFunc)
+
+ // Set a finalizer function to close the writer when it is garbage collected
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
+// writerScanner scans the input from the reader and writes it to the logger
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
- for scanner.Scan() {
- printFunc(scanner.Text())
+
+ // Set the buffer size to the maximum token size to avoid buffer overflows
+ scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
+
+ // Define a split function to split the input into chunks of up to 64KB
+ chunkSize := bufio.MaxScanTokenSize // 64KB
+ splitFunc := func(data []byte, atEOF bool) (int, []byte, error) {
+ if len(data) >= chunkSize {
+ return chunkSize, data[:chunkSize], nil
+ }
+
+ return bufio.ScanLines(data, atEOF)
}
+
+ // Use the custom split function to split the input
+ scanner.Split(splitFunc)
+
+ // Scan the input and write it to the logger using the specified print function
+ for scanner.Scan() {
+ printFunc(strings.TrimRight(scanner.Text(), "\r\n"))
+ }
+
+ // If there was an error while scanning the input, log an error
if err := scanner.Err(); err != nil {
entry.Errorf("Error while reading from Writer: %s", err)
}
+
+ // Close the reader when we are done
reader.Close()
}
+// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}
diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml
index 53c0110b..69c6ced5 100644
--- a/vendor/github.com/tklauser/numcpus/.cirrus.yml
+++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml
@@ -1,6 +1,6 @@
env:
CIRRUS_CLONE_DEPTH: 1
- GO_VERSION: go1.19.1
+ GO_VERSION: go1.20
freebsd_12_task:
freebsd_instance:
diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go
index 4b6675a2..19b76c9b 100644
--- a/vendor/github.com/urfave/cli/v2/app.go
+++ b/vendor/github.com/urfave/cli/v2/app.go
@@ -332,11 +332,18 @@ func (a *App) RunContext(ctx context.Context, arguments []string) (err error) {
return a.rootCommand.Run(cCtx, arguments...)
}
-// This is a stub function to keep public API unchanged from old code
-//
-// Deprecated: use App.Run or App.RunContext
+// RunAsSubcommand is for legacy/compatibility purposes only. New code should only
+// use App.RunContext. This function is slated to be removed in v3.
func (a *App) RunAsSubcommand(ctx *Context) (err error) {
- return a.RunContext(ctx.Context, ctx.Args().Slice())
+ a.Setup()
+
+ cCtx := NewContext(a, nil, ctx)
+ cCtx.shellComplete = ctx.shellComplete
+
+ a.rootCommand = a.newRootCommand()
+ cCtx.Command = a.rootCommand
+
+ return a.rootCommand.Run(cCtx, ctx.Args().Slice()...)
}
func (a *App) suggestFlagFromError(err error, command string) (string, error) {
diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go
index f978b4a4..69a0fdf6 100644
--- a/vendor/github.com/urfave/cli/v2/command.go
+++ b/vendor/github.com/urfave/cli/v2/command.go
@@ -135,6 +135,7 @@ func (c *Command) setup(ctx *Context) {
if scmd.HelpName == "" {
scmd.HelpName = fmt.Sprintf("%s %s", c.HelpName, scmd.Name)
}
+ scmd.separator = c.separator
newCmds = append(newCmds, scmd)
}
c.Subcommands = newCmds
diff --git a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go
index dbf50e49..a45c120b 100644
--- a/vendor/github.com/urfave/cli/v2/context.go
+++ b/vendor/github.com/urfave/cli/v2/context.go
@@ -56,6 +56,7 @@ func (cCtx *Context) Set(name, value string) error {
// IsSet determines if the flag was actually set
func (cCtx *Context) IsSet(name string) bool {
+
if fs := cCtx.lookupFlagSet(name); fs != nil {
isSet := false
fs.Visit(func(f *flag.Flag) {
@@ -72,7 +73,23 @@ func (cCtx *Context) IsSet(name string) bool {
return false
}
- return f.IsSet()
+ if f.IsSet() {
+ return true
+ }
+
+ // now redo flagset search on aliases
+ aliases := f.Names()
+ fs.Visit(func(f *flag.Flag) {
+ for _, alias := range aliases {
+ if f.Name == alias {
+ isSet = true
+ }
+ }
+ })
+
+ if isSet {
+ return true
+ }
}
return false
@@ -204,9 +221,10 @@ func (cCtx *Context) checkRequiredFlags(flags []Flag) requiredFlagsErr {
var flagPresent bool
var flagName string
- for _, key := range f.Names() {
- flagName = key
+ flagNames := f.Names()
+ flagName = flagNames[0]
+ for _, key := range flagNames {
if cCtx.IsSet(strings.TrimSpace(key)) {
flagPresent = true
}
diff --git a/vendor/github.com/urfave/cli/v2/docs.go b/vendor/github.com/urfave/cli/v2/docs.go
index 8b1c9c8a..6cd0624a 100644
--- a/vendor/github.com/urfave/cli/v2/docs.go
+++ b/vendor/github.com/urfave/cli/v2/docs.go
@@ -153,9 +153,14 @@ func prepareFlags(
// flagDetails returns a string containing the flags metadata
func flagDetails(flag DocGenerationFlag) string {
description := flag.GetUsage()
- value := flag.GetValue()
- if value != "" {
- description += " (default: " + value + ")"
+ if flag.TakesValue() {
+ defaultText := flag.GetDefaultText()
+ if defaultText == "" {
+ defaultText = flag.GetValue()
+ }
+ if defaultText != "" {
+ description += " (default: " + defaultText + ")"
+ }
}
return ": " + description
}
diff --git a/vendor/github.com/urfave/cli/v2/flag_bool.go b/vendor/github.com/urfave/cli/v2/flag_bool.go
index f64d1cd9..369d18b7 100644
--- a/vendor/github.com/urfave/cli/v2/flag_bool.go
+++ b/vendor/github.com/urfave/cli/v2/flag_bool.go
@@ -52,7 +52,7 @@ func (b *boolValue) String() string {
func (b *boolValue) IsBoolFlag() bool { return true }
func (b *boolValue) Count() int {
- if b.count != nil {
+ if b.count != nil && *b.count > 0 {
return *b.count
}
return 0
@@ -130,6 +130,11 @@ func (f *BoolFlag) Apply(set *flag.FlagSet) error {
if count == nil {
count = new(int)
}
+
+ // since count will be incremented for each alias as well
+ // subtract number of aliases from overall count
+ *count -= len(f.Aliases)
+
if dest == nil {
dest = new(bool)
}
diff --git a/vendor/github.com/urfave/cli/v2/flag_generic.go b/vendor/github.com/urfave/cli/v2/flag_generic.go
index 4f9ac0a7..039ffdfe 100644
--- a/vendor/github.com/urfave/cli/v2/flag_generic.go
+++ b/vendor/github.com/urfave/cli/v2/flag_generic.go
@@ -117,13 +117,8 @@ func (cCtx *Context) Generic(name string) interface{} {
}
func lookupGeneric(name string, set *flag.FlagSet) interface{} {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value, error(nil)
- if err != nil {
- return nil
- }
- return parsed
+ if f := set.Lookup(name); f != nil {
+ return f.Value
}
return nil
}
diff --git a/vendor/github.com/urfave/cli/v2/flag_path.go b/vendor/github.com/urfave/cli/v2/flag_path.go
index 6434d322..c4986779 100644
--- a/vendor/github.com/urfave/cli/v2/flag_path.go
+++ b/vendor/github.com/urfave/cli/v2/flag_path.go
@@ -90,13 +90,8 @@ func (cCtx *Context) Path(name string) string {
}
func lookupPath(name string, set *flag.FlagSet) string {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value.String(), error(nil)
- if err != nil {
- return ""
- }
- return parsed
+ if f := set.Lookup(name); f != nil {
+ return f.Value.String()
}
return ""
}
diff --git a/vendor/github.com/urfave/cli/v2/flag_string.go b/vendor/github.com/urfave/cli/v2/flag_string.go
index 3050086e..4e55a2ca 100644
--- a/vendor/github.com/urfave/cli/v2/flag_string.go
+++ b/vendor/github.com/urfave/cli/v2/flag_string.go
@@ -87,10 +87,8 @@ func (cCtx *Context) String(name string) string {
}
func lookupString(name string, set *flag.FlagSet) string {
- f := set.Lookup(name)
- if f != nil {
- parsed := f.Value.String()
- return parsed
+ if f := set.Lookup(name); f != nil {
+ return f.Value.String()
}
return ""
}
diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt
index bd5e1def..6016bd82 100644
--- a/vendor/github.com/urfave/cli/v2/godoc-current.txt
+++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt
@@ -141,7 +141,7 @@ USAGE:
DESCRIPTION:
{{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
-COMMANDS:{{template "visibleCommandTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
+COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}}
@@ -357,9 +357,8 @@ func (a *App) RunAndExitOnError()
code in the cli.ExitCoder
func (a *App) RunAsSubcommand(ctx *Context) (err error)
- This is a stub function to keep public API unchanged from old code
-
- Deprecated: use App.Run or App.RunContext
+ RunAsSubcommand is for legacy/compatibility purposes only. New code should
+ only use App.RunContext. This function is slated to be removed in v3.
func (a *App) RunContext(ctx context.Context, arguments []string) (err error)
RunContext is like Run except it takes a Context that will be passed to
diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go
index c7b8f55a..a71fceb7 100644
--- a/vendor/github.com/urfave/cli/v2/help.go
+++ b/vendor/github.com/urfave/cli/v2/help.go
@@ -42,6 +42,7 @@ var helpCommand = &Command{
// 3 $ app foo
// 4 $ app help foo
// 5 $ app foo help
+ // 6 $ app foo -h (with no other sub-commands nor flags defined)
// Case 4. when executing a help command set the context to parent
// to allow resolution of subsequent args. This will transform
@@ -77,6 +78,8 @@ var helpCommand = &Command{
HelpPrinter(cCtx.App.Writer, templ, cCtx.Command)
return nil
}
+
+ // Case 6, handling incorporated in the callee itself
return ShowSubcommandHelp(cCtx)
},
}
@@ -292,8 +295,12 @@ func ShowSubcommandHelp(cCtx *Context) error {
if cCtx == nil {
return nil
}
-
- HelpPrinter(cCtx.App.Writer, SubcommandHelpTemplate, cCtx.Command)
+ // use custom template when provided (fixes #1703)
+ templ := SubcommandHelpTemplate
+ if cCtx.Command != nil && cCtx.Command.CustomHelpTemplate != "" {
+ templ = cCtx.Command.CustomHelpTemplate
+ }
+ HelpPrinter(cCtx.App.Writer, templ, cCtx.Command)
return nil
}
diff --git a/vendor/github.com/urfave/cli/v2/sliceflag.go b/vendor/github.com/urfave/cli/v2/sliceflag.go
index 7dea3576..b2ca5904 100644
--- a/vendor/github.com/urfave/cli/v2/sliceflag.go
+++ b/vendor/github.com/urfave/cli/v2/sliceflag.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
package cli
import (
diff --git a/vendor/github.com/urfave/cli/v2/sliceflag_pre18.go b/vendor/github.com/urfave/cli/v2/sliceflag_pre18.go
deleted file mode 100644
index 1173ae74..00000000
--- a/vendor/github.com/urfave/cli/v2/sliceflag_pre18.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build !go1.18
-// +build !go1.18
-
-package cli
-
-import (
- "flag"
-)
-
-func unwrapFlagValue(v flag.Value) flag.Value { return v }
diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go
index b565ba61..da98890e 100644
--- a/vendor/github.com/urfave/cli/v2/template.go
+++ b/vendor/github.com/urfave/cli/v2/template.go
@@ -88,7 +88,7 @@ USAGE:
DESCRIPTION:
{{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
-COMMANDS:{{template "visibleCommandTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
+COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}}
diff --git a/vendor/github.com/vektah/gqlparser/v2/.go-version b/vendor/github.com/vektah/gqlparser/v2/.go-version
new file mode 100644
index 00000000..66e2ae6c
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/.go-version
@@ -0,0 +1 @@
+1.19.1
diff --git a/vendor/github.com/vektah/gqlparser/v2/.golangci.yaml b/vendor/github.com/vektah/gqlparser/v2/.golangci.yaml
new file mode 100644
index 00000000..accb83fa
--- /dev/null
+++ b/vendor/github.com/vektah/gqlparser/v2/.golangci.yaml
@@ -0,0 +1,45 @@
+run:
+ timeout: 10m
+linters:
+ disable-all: true
+ enable:
+ - ineffassign
+ - typecheck
+ - varcheck
+ - unused
+ - structcheck
+ - deadcode
+ - gosimple
+ - goimports
+ - errcheck
+ - staticcheck
+ - stylecheck
+ - gosec
+ - asciicheck
+ - bodyclose
+ - exportloopref
+ - rowserrcheck
+ - makezero
+ - durationcheck
+ - prealloc
+ - predeclared
+
+linters-settings:
+ staticcheck:
+ checks: ["S1002","S1004","S1007","S1009","S1010","S1012","S1019","S1020","S1021","S1024","S1030","SA2*","SA3*","SA4009","SA5*","SA6000","SA6001","SA6005", "-SA2002"]
+ stylecheck:
+ checks: ["-ST1003"]
+ gosec:
+ severity: "low"
+ confidence: "low"
+ excludes:
+ - G101
+ - G112
+issues:
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - errcheck
+ - gosec
+ - rowserrcheck
+ - makezero
diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/path.go b/vendor/github.com/vektah/gqlparser/v2/ast/path.go
index 9af16843..be1a9e4e 100644
--- a/vendor/github.com/vektah/gqlparser/v2/ast/path.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/path.go
@@ -60,8 +60,8 @@ func (path *Path) UnmarshalJSON(b []byte) error {
type PathIndex int
-func (_ PathIndex) isPathElement() {}
+func (PathIndex) isPathElement() {}
type PathName string
-func (_ PathName) isPathElement() {}
+func (PathName) isPathElement() {}
diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/selection.go b/vendor/github.com/vektah/gqlparser/v2/ast/selection.go
index 159db844..5ef26c6a 100644
--- a/vendor/github.com/vektah/gqlparser/v2/ast/selection.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/selection.go
@@ -34,6 +34,6 @@ type Argument struct {
Position *Position `dump:"-"`
}
-func (f *Field) ArgumentMap(vars map[string]interface{}) map[string]interface{} {
- return arg2map(f.Definition.Arguments, f.Arguments, vars)
+func (s *Field) ArgumentMap(vars map[string]interface{}) map[string]interface{} {
+ return arg2map(s.Definition.Arguments, s.Arguments, vars)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/type.go b/vendor/github.com/vektah/gqlparser/v2/ast/type.go
index 9577fdb4..5f77bc7c 100644
--- a/vendor/github.com/vektah/gqlparser/v2/ast/type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/type.go
@@ -63,6 +63,6 @@ func (t *Type) IsCompatible(other *Type) bool {
return true
}
-func (v *Type) Dump() string {
- return v.String()
+func (t *Type) Dump() string {
+ return t.String()
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go
index 8145061a..79e9e0d1 100644
--- a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go
+++ b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go
@@ -9,7 +9,7 @@ import (
"github.com/vektah/gqlparser/v2/ast"
)
-// Error is the standard graphql error type described in https://facebook.github.io/graphql/draft/#sec-Errors
+// Error is the standard graphql error type described in https://spec.graphql.org/draft/#sec-Errors
type Error struct {
err error `json:"-"`
Message string `json:"message"`
@@ -107,6 +107,13 @@ func WrapPath(path ast.Path, err error) *Error {
}
}
+func Wrap(err error) *Error {
+ return &Error{
+ err: err,
+ Message: err.Error(),
+ }
+}
+
func Errorf(message string, args ...interface{}) *Error {
return &Error{
Message: fmt.Sprintf(message, args...),
diff --git a/vendor/github.com/vektah/gqlparser/v2/gqlparser.go b/vendor/github.com/vektah/gqlparser/v2/gqlparser.go
index e242a896..3a6e2d13 100644
--- a/vendor/github.com/vektah/gqlparser/v2/gqlparser.go
+++ b/vendor/github.com/vektah/gqlparser/v2/gqlparser.go
@@ -5,11 +5,21 @@ import (
"github.com/vektah/gqlparser/v2/gqlerror"
"github.com/vektah/gqlparser/v2/parser"
"github.com/vektah/gqlparser/v2/validator"
+
+ // Blank import is used to load up the validator rules.
_ "github.com/vektah/gqlparser/v2/validator/rules"
)
func LoadSchema(str ...*ast.Source) (*ast.Schema, error) {
- return validator.LoadSchema(append([]*ast.Source{validator.Prelude}, str...)...)
+ ast, err := validator.LoadSchema(append([]*ast.Source{validator.Prelude}, str...)...)
+ gqlErr, ok := err.(*gqlerror.Error)
+ if ok {
+ return ast, gqlErr
+ }
+ if err != nil {
+ return ast, gqlerror.Wrap(err)
+ }
+ return ast, nil
}
func MustLoadSchema(str ...*ast.Source) *ast.Schema {
@@ -23,11 +33,14 @@ func MustLoadSchema(str ...*ast.Source) *ast.Schema {
func LoadQuery(schema *ast.Schema, str string) (*ast.QueryDocument, gqlerror.List) {
query, err := parser.ParseQuery(&ast.Source{Input: str})
if err != nil {
- gqlErr := err.(*gqlerror.Error)
- return nil, gqlerror.List{gqlErr}
+ gqlErr, ok := err.(*gqlerror.Error)
+ if ok {
+ return nil, gqlerror.List{gqlErr}
+ }
+ return nil, gqlerror.List{gqlerror.Wrap(err)}
}
errs := validator.Validate(schema, query)
- if errs != nil {
+ if len(errs) > 0 {
return nil, errs
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go
index 25dcdcb9..8ce04fd2 100644
--- a/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go
+++ b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go
@@ -121,7 +121,9 @@ func (s *Lexer) ReadToken() (token Token, err error) {
case '|':
return s.makeValueToken(Pipe, "")
case '#':
- s.readComment()
+ if comment, err := s.readComment(); err != nil {
+ return comment, err
+ }
return s.ReadToken()
case '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
@@ -254,9 +256,9 @@ func (s *Lexer) readNumber() (Token, error) {
if float {
return s.makeToken(Float)
- } else {
- return s.makeToken(Int)
}
+ return s.makeToken(Int)
+
}
// acceptByte if it matches any of given bytes, returning true if it found anything
@@ -393,8 +395,8 @@ func (s *Lexer) readString() (Token, error) {
case 't':
buf.WriteByte('\t')
default:
- s.end += 1
- s.endRunes += 1
+ s.end++
+ s.endRunes++
return s.makeError("Invalid character escape sequence: \\%s.", string(escape))
}
s.end += 2
diff --git a/vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml
index 5c4d5f0f..d85afb41 100644
--- a/vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml
+++ b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml
@@ -674,7 +674,6 @@ lex reports useful unknown character error:
- name: question mark
input: "?"
error:
- message: 'Cannot parse the unexpected character "?".'
message: 'Cannot parse the unexpected character "?".'
locations: [{ line: 1, column: 1 }]
diff --git a/vendor/github.com/vektah/gqlparser/v2/parser/query.go b/vendor/github.com/vektah/gqlparser/v2/parser/query.go
index a38d982a..123423bb 100644
--- a/vendor/github.com/vektah/gqlparser/v2/parser/query.go
+++ b/vendor/github.com/vektah/gqlparser/v2/parser/query.go
@@ -3,6 +3,7 @@ package parser
import (
"github.com/vektah/gqlparser/v2/lexer"
+ //nolint:revive
. "github.com/vektah/gqlparser/v2/ast"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/parser/schema.go b/vendor/github.com/vektah/gqlparser/v2/parser/schema.go
index aec08a97..9656bcca 100644
--- a/vendor/github.com/vektah/gqlparser/v2/parser/schema.go
+++ b/vendor/github.com/vektah/gqlparser/v2/parser/schema.go
@@ -1,6 +1,7 @@
package parser
import (
+ //nolint:revive
. "github.com/vektah/gqlparser/v2/ast"
"github.com/vektah/gqlparser/v2/lexer"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go b/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go
index c354ec0d..43766f22 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go
@@ -2,6 +2,7 @@ package validator
import (
_ "embed"
+
"github.com/vektah/gqlparser/v2/ast"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/prelude.graphql b/vendor/github.com/vektah/gqlparser/v2/validator/prelude.graphql
index bdca0096..e199da5d 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/prelude.graphql
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/prelude.graphql
@@ -27,6 +27,9 @@ directive @deprecated(reason: String = "No longer supported") on FIELD_DEFINITIO
"The @specifiedBy built-in directive is used within the type system definition language to provide a scalar specification URL for specifying the behavior of custom scalar types."
directive @specifiedBy(url: String!) on SCALAR
+"The @defer directive may be specified on a fragment spread to imply de-prioritization, that causes the fragment to be omitted in the initial response, and delivered as a subsequent response afterward. A query with @defer directive will cause the request to potentially return multiple responses, where non-deferred data is delivered in the initial response and data deferred delivered in a subsequent response. @include and @skip take precedence over @defer."
+directive @defer(if: Boolean = true, label: String) on FRAGMENT_SPREAD | INLINE_FRAGMENT
+
type __Schema {
description: String
types: [__Type!]!
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
index aa83c696..77c3fdac 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
@@ -6,6 +6,8 @@ import (
"strings"
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
@@ -41,11 +43,12 @@ func getSuggestedTypeNames(walker *Walker, parent *ast.Definition, name string)
return nil
}
- var suggestedObjectTypes []string
+ possibleTypes := walker.Schema.GetPossibleTypes(parent)
+ var suggestedObjectTypes = make([]string, 0, len(possibleTypes))
var suggestedInterfaceTypes []string
interfaceUsageCount := map[string]int{}
- for _, possibleType := range walker.Schema.GetPossibleTypes(parent) {
+ for _, possibleType := range possibleTypes {
field := possibleType.Fields.ForName(name)
if field == nil {
continue
@@ -85,7 +88,7 @@ func getSuggestedFieldNames(parent *ast.Definition, name string) []string {
return nil
}
- var possibleFieldNames []string
+ var possibleFieldNames = make([]string, 0, len(parent.Fields))
for _, field := range parent.Fields {
possibleFieldNames = append(possibleFieldNames, field.Name)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
index 5215f697..81ef861b 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
@@ -4,6 +4,8 @@ import (
"fmt"
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go
index da5a7962..c187dabf 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
index 18fe41fd..9b5f28db 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
@@ -12,7 +14,7 @@ func init() {
Line int
Column int
}
- var seen map[mayNotBeUsedDirective]bool = map[mayNotBeUsedDirective]bool{}
+ var seen = map[mayNotBeUsedDirective]bool{}
observers.OnDirective(func(walker *Walker, directive *ast.Directive) {
if directive.Definition == nil {
addError(
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go
index b7427d0d..3afd9c1c 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
index 4c60c8d8..60bc0d52 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
@@ -4,6 +4,8 @@ import (
"fmt"
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go
index 7abfbf62..902939d3 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go
index d2752854..fe8bb203 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
index da73f349..a953174f 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
@@ -5,6 +5,8 @@ import (
"strings"
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
index 91df727a..46c18d12 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go
index dfc89672..218ffa77 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
index df2e5f4b..d3088109 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
index 38e1efa1..2ffc3dd1 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
@@ -6,6 +6,8 @@ import (
"reflect"
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
index a3f795c9..bed70289 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
index d8ed6520..ab79163b 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
index 718bc683..605ab9e8 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
index a9e5bf63..7d4c6843 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
@@ -5,6 +5,8 @@ import (
"strings"
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go
index 1d9a50ab..e977d638 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go
index 52dfb21e..47971ee1 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go
index 8c348aea..2c44a437 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go
index 092be671..c5fce8ff 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go
index 4d41b60a..49ffbe47 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go
index 6481ef4c..c93948c1 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go
index 22bea771..7ba6928b 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go
@@ -6,6 +6,8 @@ import (
"strconv"
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go
index 4ea94e5a..d16ee021 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
index eef74354..e3fd6fbb 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
@@ -2,6 +2,8 @@ package validator
import (
"github.com/vektah/gqlparser/v2/ast"
+
+ //nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/vektah/gqlparser/v2/validator"
)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
index 976ed830..21eb51a5 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
@@ -5,6 +5,7 @@ import (
"strconv"
"strings"
+ //nolint:revive
. "github.com/vektah/gqlparser/v2/ast"
"github.com/vektah/gqlparser/v2/gqlerror"
"github.com/vektah/gqlparser/v2/parser"
@@ -85,7 +86,7 @@ func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) {
// scalars, it may (§3.13) define builtin directives. Here we check for
// that, and reject doubly-defined directives otherwise.
switch dir.Name {
- case "include", "skip", "deprecated", "specifiedBy": // the builtins
+ case "include", "skip", "deprecated", "specifiedBy", "defer": // the builtins
// In principle here we might want to validate that the
// directives are the same. But they might not be, if the
// server has an older spec than we do. (Plus, validating this
@@ -283,6 +284,9 @@ func validateDefinition(schema *Schema, def *Definition) *gqlerror.Error {
return gqlerror.ErrorPosf(def.Position, "%s %s: non-enum value %s.", def.Kind, def.Name, value.Name)
}
}
+ if err := validateDirectives(schema, value.Directives, LocationEnumValue, nil); err != nil {
+ return err
+ }
}
case InputObject:
if len(def.Fields) == 0 {
@@ -358,11 +362,12 @@ func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLo
if currentDirective != nil && dir.Name == currentDirective.Name {
return gqlerror.ErrorPosf(dir.Position, "Directive %s cannot refer to itself.", currentDirective.Name)
}
- if schema.Directives[dir.Name] == nil {
+ dirDefinition := schema.Directives[dir.Name]
+ if dirDefinition == nil {
return gqlerror.ErrorPosf(dir.Position, "Undefined directive %s.", dir.Name)
}
validKind := false
- for _, dirLocation := range schema.Directives[dir.Name].Locations {
+ for _, dirLocation := range dirDefinition.Locations {
if dirLocation == location {
validKind = true
break
@@ -371,6 +376,18 @@ func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLo
if !validKind {
return gqlerror.ErrorPosf(dir.Position, "Directive %s is not applicable on %s.", dir.Name, location)
}
+ for _, arg := range dir.Arguments {
+ if dirDefinition.Arguments.ForName(arg.Name) == nil {
+ return gqlerror.ErrorPosf(arg.Position, "Undefined argument %s for directive %s.", arg.Name, dir.Name)
+ }
+ }
+ for _, schemaArg := range dirDefinition.Arguments {
+ if schemaArg.Type.NonNull {
+ if arg := dir.Arguments.ForName(schemaArg.Name); arg == nil || arg.Value.Kind == NullValue {
+ return gqlerror.ErrorPosf(dir.Position, "Argument %s for directive %s cannot be null.", schemaArg.Name, dir.Name)
+ }
+ }
+ }
dir.Definition = schema.Directives[dir.Name]
}
return nil
@@ -378,7 +395,7 @@ func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLo
func validateImplements(schema *Schema, def *Definition, intfName string) *gqlerror.Error {
// see validation rules at the bottom of
- // https://facebook.github.io/graphql/October2021/#sec-Objects
+ // https://spec.graphql.org/October2021/#sec-Objects
intf := schema.Types[intfName]
if intf == nil {
return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(intfName))
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml b/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml
index 7034a469..c16caabb 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml
@@ -528,7 +528,16 @@ directives:
directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
- - name: must be declared
+ - name: must be declared (type)
+ input: |
+ type User @foo {
+ name: String
+ }
+ error:
+ message: "Undefined directive foo."
+ locations: [{line: 1, column: 12}]
+
+ - name: must be declared (field)
input: |
type User {
name: String @foo
@@ -537,6 +546,15 @@ directives:
message: "Undefined directive foo."
locations: [{line: 2, column: 17}]
+ - name: must be declared (enum)
+ input: |
+ enum Unit {
+ METER @foo
+ }
+ error:
+ message: "Undefined directive foo."
+ locations: [{line: 2, column: 10}]
+
- name: cannot be self-referential
input: |
directive @A(foo: Int! @A) on FIELD_DEFINITION
@@ -604,6 +622,32 @@ directives:
type P { name: String @testField }
interface I { id: ID @testField }
+ - name: Invalid directive argument not allowed
+ input: |
+ directive @foo(bla: Int!) on FIELD_DEFINITION
+ type P {f: Int @foo(foobla: 11)}
+
+ error:
+ message: 'Undefined argument foobla for directive foo.'
+ locations: [{line: 2, column: 21}]
+
+ - name: non-null argument must be provided
+ input: |
+ directive @foo(bla: Int!) on FIELD_DEFINITION
+ type P {f: Int @foo }
+
+ error:
+ message: 'Argument bla for directive foo cannot be null.'
+ locations: [{line: 2, column: 17}]
+
+ - name: non-null argument must not be null
+ input: |
+ directive @foo(bla: Int!) on FIELD_DEFINITION
+ type P {f: Int @foo(bla: null) }
+
+ error:
+ message: 'Argument bla for directive foo cannot be null.'
+ locations: [{line: 2, column: 17}]
entry points:
- name: multiple schema entry points
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go
index 34bf93db..b4f37ce2 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go
@@ -1,6 +1,7 @@
package validator
import (
+ //nolint:revive
. "github.com/vektah/gqlparser/v2/ast"
"github.com/vektah/gqlparser/v2/gqlerror"
)
@@ -24,7 +25,15 @@ func AddRule(name string, f ruleFunc) {
func Validate(schema *Schema, doc *QueryDocument) gqlerror.List {
var errs gqlerror.List
-
+ if schema == nil {
+ errs = append(errs, gqlerror.Errorf("cannot validate as Schema is nil"))
+ }
+ if doc == nil {
+ errs = append(errs, gqlerror.Errorf("cannot validate as QueryDocument is nil"))
+ }
+ if len(errs) > 0 {
+ return errs
+ }
observers := &Events{}
for i := range rules {
rule := rules[i]
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/vars.go b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go
index 8dbb05bc..df530234 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/vars.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go
@@ -11,7 +11,7 @@ import (
"github.com/vektah/gqlparser/v2/gqlerror"
)
-var UnexpectedType = fmt.Errorf("Unexpected Type")
+var ErrUnexpectedType = fmt.Errorf("Unexpected Type")
// VariableValues coerces and validates variable values
func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables map[string]interface{}) (map[string]interface{}, error) {
@@ -53,8 +53,8 @@ func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables m
} else {
rv := reflect.ValueOf(val)
- jsonNumber, isJsonNumber := val.(json.Number)
- if isJsonNumber {
+ jsonNumber, isJSONNumber := val.(json.Number)
+ if isJSONNumber {
if v.Type.NamedType == "Int" {
n, err := jsonNumber.Int64()
if err != nil {
diff --git a/vendor/golang.org/x/net/http2/h2c/h2c.go b/vendor/golang.org/x/net/http2/h2c/h2c.go
index a72bbed1..2d6bf861 100644
--- a/vendor/golang.org/x/net/http2/h2c/h2c.go
+++ b/vendor/golang.org/x/net/http2/h2c/h2c.go
@@ -44,7 +44,7 @@ func init() {
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
-// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
+// convert it to an HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index cd057f39..033b6e6d 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
if s.NewWriteScheduler != nil {
sc.writeSched = s.NewWriteScheduler()
} else {
- sc.writeSched = NewPriorityWriteScheduler(nil)
+ sc.writeSched = newRoundRobinWriteScheduler()
}
// These start at the RFC-specified defaults. If there is a higher
@@ -2429,7 +2429,7 @@ type requestBody struct {
conn *serverConn
closeOnce sync.Once // for use by Close only
sawEOF bool // for use by Read only
- pipe *pipe // non-nil if we have a HTTP entity message body
+ pipe *pipe // non-nil if we have an HTTP entity message body
needsContinue bool // need to send a 100-continue
}
@@ -2569,7 +2569,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = ""
}
}
- if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ _, hasContentLength := rws.snapHeader["Content-Length"]
+ if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
@@ -2774,7 +2775,7 @@ func (w *responseWriter) FlushError() error {
err = rws.bw.Flush()
} else {
// The bufio.Writer won't call chunkWriter.Write
- // (writeChunk with zero bytes, so we have to do it
+ // (writeChunk with zero bytes), so we have to do it
// ourselves to force the HTTP response header and/or
// final DATA frame (with END_STREAM) to be sent.
_, err = chunkWriter{rws}.Write(nil)
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index ac90a263..4f08ccba 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -1268,8 +1268,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
cancelRequest := func(cs *clientStream, err error) error {
cs.cc.mu.Lock()
- defer cs.cc.mu.Unlock()
cs.abortStreamLocked(err)
+ bodyClosed := cs.reqBodyClosed
if cs.ID != 0 {
// This request may have failed because of a problem with the connection,
// or for some unrelated reason. (For example, the user might have canceled
@@ -1284,6 +1284,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// will not help.
cs.cc.doNotReuse = true
}
+ cs.cc.mu.Unlock()
+ // Wait for the request body to be closed.
+ //
+ // If nothing closed the body before now, abortStreamLocked
+ // will have started a goroutine to close it.
+ //
+ // Closing the body before returning avoids a race condition
+ // with net/http checking its readTrackingBody to see if the
+ // body was read from or closed. See golang/go#60041.
+ //
+ // The body is closed in a separate goroutine without the
+ // connection mutex held, but dropping the mutex before waiting
+ // will keep us from holding it indefinitely if the body
+ // close is slow for some reason.
+ if bodyClosed != nil {
+ <-bodyClosed
+ }
return err
}
@@ -1899,7 +1916,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
- // followed by the query production (see Sections 3.3 and 3.4 of
+ // followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index c7cd0017..cc893adc 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct {
- s []FrameWriteRequest
+ s []FrameWriteRequest
+ prev, next *writeQueue
}
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
new file mode 100644
index 00000000..54fe8632
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -0,0 +1,119 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+)
+
+type roundRobinWriteScheduler struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control writeQueue
+
+ // streams maps stream ID to a queue.
+ streams map[uint32]*writeQueue
+
+ // stream queues are stored in a circular linked list.
+ // head is the next stream to write, or nil if there are no streams open.
+ head *writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+// newRoundRobinWriteScheduler constructs a new write scheduler.
+// The round robin scheduler priorizes control frames
+// like SETTINGS and PING over DATA frames.
+// When there are no control frames to send, it performs a round-robin
+// selection from the ready streams.
+func newRoundRobinWriteScheduler() WriteScheduler {
+ ws := &roundRobinWriteScheduler{
+ streams: make(map[uint32]*writeQueue),
+ }
+ return ws
+}
+
+func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ if ws.streams[streamID] != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = q
+ if ws.head == nil {
+ ws.head = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.head.prev
+ q.next = ws.head
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) {
+ q := ws.streams[streamID]
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.head = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.head == q {
+ ws.head = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {}
+
+func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()]
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+ if ws.head == nil {
+ return FrameWriteRequest{}, false
+ }
+ q := ws.head
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ ws.head = q.next
+ return wr, true
+ }
+ q = q.next
+ if q == ws.head {
+ break
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/sys/cpu/endian_little.go b/vendor/golang.org/x/sys/cpu/endian_little.go
index fe545966..55db853e 100644
--- a/vendor/golang.org/x/sys/cpu/endian_little.go
+++ b/vendor/golang.org/x/sys/cpu/endian_little.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
-// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
+//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm
+// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh wasm
package cpu
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index 8e3947c3..e6f31d37 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
$cmd docker build --tag generate:$GOOS $GOOS
- $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS
+ $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit
fi
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index be0423e6..31564627 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -741,7 +741,8 @@ main(void)
e = errors[i].num;
if(i > 0 && errors[i-1].num == e)
continue;
- strcpy(buf, strerror(e));
+ strncpy(buf, strerror(e), sizeof(buf) - 1);
+ buf[sizeof(buf) - 1] = '\0';
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
buf[0] += a - A;
@@ -760,7 +761,8 @@ main(void)
e = signals[i].num;
if(i > 0 && signals[i-1].num == e)
continue;
- strcpy(buf, strsignal(e));
+ strncpy(buf, strsignal(e), sizeof(buf) - 1);
+ buf[sizeof(buf) - 1] = '\0';
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
buf[0] += a - A;
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index fbaeb5ff..6de486be 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1699,12 +1699,23 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) {
return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data)
}
+// elfNT_PRSTATUS is a copy of the debug/elf.NT_PRSTATUS constant so
+// x/sys/unix doesn't need to depend on debug/elf and thus
+// compress/zlib, debug/dwarf, and other packages.
+const elfNT_PRSTATUS = 1
+
func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
- return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
+ var iov Iovec
+ iov.Base = (*byte)(unsafe.Pointer(regsout))
+ iov.SetLen(int(unsafe.Sizeof(*regsout)))
+ return ptracePtr(PTRACE_GETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov))
}
func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
- return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
+ var iov Iovec
+ iov.Base = (*byte)(unsafe.Pointer(regs))
+ iov.SetLen(int(unsafe.Sizeof(*regs)))
+ return ptracePtr(PTRACE_SETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov))
}
func PtraceSetOptions(pid int, options int) (err error) {
@@ -2420,6 +2431,21 @@ func PthreadSigmask(how int, set, oldset *Sigset_t) error {
return rtSigprocmask(how, set, oldset, _C__NSIG/8)
}
+//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int)
+//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int)
+
+func Getresuid() (ruid, euid, suid int) {
+ var r, e, s _C_int
+ getresuid(&r, &e, &s)
+ return int(r), int(e), int(s)
+}
+
+func Getresgid() (rgid, egid, sgid int) {
+ var r, e, s _C_int
+ getresgid(&r, &e, &s)
+ return int(r), int(e), int(s)
+}
+
/*
* Unimplemented
*/
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index f9c7a966..c5f166a1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -151,6 +151,21 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
return
}
+//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int)
+//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int)
+
+func Getresuid() (ruid, euid, suid int) {
+ var r, e, s _C_int
+ getresuid(&r, &e, &s)
+ return int(r), int(e), int(s)
+}
+
+func Getresgid() (rgid, egid, sgid int) {
+ var r, e, s _C_int
+ getresgid(&r, &e, &s)
+ return int(r), int(e), int(s)
+}
+
//sys ioctl(fd int, req uint, arg uintptr) (err error)
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
@@ -338,8 +353,6 @@ func Uname(uname *Utsname) error {
// getgid
// getitimer
// getlogin
-// getresgid
-// getresuid
// getthrid
// ktrace
// lfs_bmapv
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index f6192526..48984202 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -329,6 +329,54 @@ const (
SCM_WIFI_STATUS = 0x25
SFD_CLOEXEC = 0x400000
SFD_NONBLOCK = 0x4000
+ SF_FP = 0x38
+ SF_I0 = 0x20
+ SF_I1 = 0x24
+ SF_I2 = 0x28
+ SF_I3 = 0x2c
+ SF_I4 = 0x30
+ SF_I5 = 0x34
+ SF_L0 = 0x0
+ SF_L1 = 0x4
+ SF_L2 = 0x8
+ SF_L3 = 0xc
+ SF_L4 = 0x10
+ SF_L5 = 0x14
+ SF_L6 = 0x18
+ SF_L7 = 0x1c
+ SF_PC = 0x3c
+ SF_RETP = 0x40
+ SF_V9_FP = 0x70
+ SF_V9_I0 = 0x40
+ SF_V9_I1 = 0x48
+ SF_V9_I2 = 0x50
+ SF_V9_I3 = 0x58
+ SF_V9_I4 = 0x60
+ SF_V9_I5 = 0x68
+ SF_V9_L0 = 0x0
+ SF_V9_L1 = 0x8
+ SF_V9_L2 = 0x10
+ SF_V9_L3 = 0x18
+ SF_V9_L4 = 0x20
+ SF_V9_L5 = 0x28
+ SF_V9_L6 = 0x30
+ SF_V9_L7 = 0x38
+ SF_V9_PC = 0x78
+ SF_V9_RETP = 0x80
+ SF_V9_XARG0 = 0x88
+ SF_V9_XARG1 = 0x90
+ SF_V9_XARG2 = 0x98
+ SF_V9_XARG3 = 0xa0
+ SF_V9_XARG4 = 0xa8
+ SF_V9_XARG5 = 0xb0
+ SF_V9_XXARG = 0xb8
+ SF_XARG0 = 0x44
+ SF_XARG1 = 0x48
+ SF_XARG2 = 0x4c
+ SF_XARG3 = 0x50
+ SF_XARG4 = 0x54
+ SF_XARG5 = 0x58
+ SF_XXARG = 0x5c
SIOCATMARK = 0x8905
SIOCGPGRP = 0x8904
SIOCGSTAMPNS_NEW = 0x40108907
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index da63d9d7..722c29a0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -2172,3 +2172,17 @@ func rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr)
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) {
+ RawSyscallNoError(SYS_GETRESUID, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid)))
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
+ RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index 6699a783..9ab9abf7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) {
+ syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid)))
+ return
+}
+
+var libc_getresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresuid getresuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
+ syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
+ return
+}
+
+var libc_getresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresgid getresgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
index 04f0de34..3dcacd30 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
@@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4
DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB)
+TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresuid(SB)
+GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB)
+
+TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresgid(SB)
+GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB)
+
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 1e775fe0..915761ea 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -519,15 +519,29 @@ var libc_getcwd_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func ioctl(fd int, req uint, arg uintptr) (err error) {
- _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
- if e1 != 0 {
- err = errnoErr(e1)
- }
+func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) {
+ syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid)))
return
}
-func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+var libc_getresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresuid getresuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
+ syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
+ return
+}
+
+var libc_getresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresgid getresgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
err = errnoErr(e1)
@@ -541,6 +555,16 @@ var libc_ioctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+ _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
index 27b6f4df..2763620b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
@@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
+TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresuid(SB)
+GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB)
+
+TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresgid(SB)
+GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB)
+
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index 7f642789..8e87fdf1 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) {
+ syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid)))
+ return
+}
+
+var libc_getresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresuid getresuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
+ syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
+ return
+}
+
+var libc_getresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresgid getresgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
index b797045f..c9223140 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
@@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4
DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB)
+TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresuid(SB)
+GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB)
+
+TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresgid(SB)
+GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB)
+
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
index 756ef7b1..12a7a216 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) {
+ syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid)))
+ return
+}
+
+var libc_getresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresuid getresuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
+ syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
+ return
+}
+
+var libc_getresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresgid getresgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
index a8712662..a6bc32c9 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
@@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
+TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresuid(SB)
+GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB)
+
+TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresgid(SB)
+GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB)
+
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index 7bc2e24e..b19e8aa0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) {
+ syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid)))
+ return
+}
+
+var libc_getresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresuid getresuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
+ syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
+ return
+}
+
+var libc_getresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresgid getresgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
index 05d4bffd..b4e7bcea 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
@@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
+TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresuid(SB)
+GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB)
+
+TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresgid(SB)
+GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB)
+
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
index 739be621..fb99594c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
@@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) {
+ syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid)))
+ return
+}
+
+var libc_getresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresuid getresuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
+ syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
+ return
+}
+
+var libc_getresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresgid getresgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
index 74a25f8d..ca3f7660 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
@@ -189,6 +189,18 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
+TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0
+ CALL libc_getresuid(SB)
+ RET
+GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB)
+
+TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0
+ CALL libc_getresgid(SB)
+ RET
+GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB)
+
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
CALL libc_ioctl(SB)
RET
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
index 7d95a197..32cbbbc5 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
@@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) {
+ syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid)))
+ return
+}
+
+var libc_getresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresuid getresuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
+ syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
+ return
+}
+
+var libc_getresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getresgid getresgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
index 990be245..477a7d5b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
@@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
+TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresuid(SB)
+GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB)
+
+TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getresgid(SB)
+GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8
+DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB)
+
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index ca84727c..00c3b8c2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -2555,6 +2555,11 @@ const (
BPF_REG_8 = 0x8
BPF_REG_9 = 0x9
BPF_REG_10 = 0xa
+ BPF_CGROUP_ITER_ORDER_UNSPEC = 0x0
+ BPF_CGROUP_ITER_SELF_ONLY = 0x1
+ BPF_CGROUP_ITER_DESCENDANTS_PRE = 0x2
+ BPF_CGROUP_ITER_DESCENDANTS_POST = 0x3
+ BPF_CGROUP_ITER_ANCESTORS_UP = 0x4
BPF_MAP_CREATE = 0x0
BPF_MAP_LOOKUP_ELEM = 0x1
BPF_MAP_UPDATE_ELEM = 0x2
@@ -2566,6 +2571,7 @@ const (
BPF_PROG_ATTACH = 0x8
BPF_PROG_DETACH = 0x9
BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_RUN = 0xa
BPF_PROG_GET_NEXT_ID = 0xb
BPF_MAP_GET_NEXT_ID = 0xc
BPF_PROG_GET_FD_BY_ID = 0xd
@@ -2610,6 +2616,7 @@ const (
BPF_MAP_TYPE_CPUMAP = 0x10
BPF_MAP_TYPE_XSKMAP = 0x11
BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 0x13
BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
@@ -2620,6 +2627,10 @@ const (
BPF_MAP_TYPE_STRUCT_OPS = 0x1a
BPF_MAP_TYPE_RINGBUF = 0x1b
BPF_MAP_TYPE_INODE_STORAGE = 0x1c
+ BPF_MAP_TYPE_TASK_STORAGE = 0x1d
+ BPF_MAP_TYPE_BLOOM_FILTER = 0x1e
+ BPF_MAP_TYPE_USER_RINGBUF = 0x1f
+ BPF_MAP_TYPE_CGRP_STORAGE = 0x20
BPF_PROG_TYPE_UNSPEC = 0x0
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
BPF_PROG_TYPE_KPROBE = 0x2
@@ -2651,6 +2662,7 @@ const (
BPF_PROG_TYPE_EXT = 0x1c
BPF_PROG_TYPE_LSM = 0x1d
BPF_PROG_TYPE_SK_LOOKUP = 0x1e
+ BPF_PROG_TYPE_SYSCALL = 0x1f
BPF_CGROUP_INET_INGRESS = 0x0
BPF_CGROUP_INET_EGRESS = 0x1
BPF_CGROUP_INET_SOCK_CREATE = 0x2
@@ -2689,6 +2701,12 @@ const (
BPF_XDP_CPUMAP = 0x23
BPF_SK_LOOKUP = 0x24
BPF_XDP = 0x25
+ BPF_SK_SKB_VERDICT = 0x26
+ BPF_SK_REUSEPORT_SELECT = 0x27
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 0x28
+ BPF_PERF_EVENT = 0x29
+ BPF_TRACE_KPROBE_MULTI = 0x2a
+ BPF_LSM_CGROUP = 0x2b
BPF_LINK_TYPE_UNSPEC = 0x0
BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1
BPF_LINK_TYPE_TRACING = 0x2
@@ -2696,6 +2714,9 @@ const (
BPF_LINK_TYPE_ITER = 0x4
BPF_LINK_TYPE_NETNS = 0x5
BPF_LINK_TYPE_XDP = 0x6
+ BPF_LINK_TYPE_PERF_EVENT = 0x7
+ BPF_LINK_TYPE_KPROBE_MULTI = 0x8
+ BPF_LINK_TYPE_STRUCT_OPS = 0x9
BPF_ANY = 0x0
BPF_NOEXIST = 0x1
BPF_EXIST = 0x2
@@ -2733,6 +2754,7 @@ const (
BPF_F_ZERO_CSUM_TX = 0x2
BPF_F_DONT_FRAGMENT = 0x4
BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_TUNINFO_FLAGS = 0x10
BPF_F_INDEX_MASK = 0xffffffff
BPF_F_CURRENT_CPU = 0xffffffff
BPF_F_CTXLEN_MASK = 0xfffff00000000
@@ -2747,6 +2769,7 @@ const (
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20
+ BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
BPF_F_SYSCTL_BASE_NAME = 0x1
@@ -2771,10 +2794,16 @@ const (
BPF_LWT_ENCAP_SEG6 = 0x0
BPF_LWT_ENCAP_SEG6_INLINE = 0x1
BPF_LWT_ENCAP_IP = 0x2
+ BPF_F_BPRM_SECUREEXEC = 0x1
+ BPF_F_BROADCAST = 0x8
+ BPF_F_EXCLUDE_INGRESS = 0x10
+ BPF_SKB_TSTAMP_UNSPEC = 0x0
+ BPF_SKB_TSTAMP_DELIVERY_MONO = 0x1
BPF_OK = 0x0
BPF_DROP = 0x2
BPF_REDIRECT = 0x7
BPF_LWT_REROUTE = 0x80
+ BPF_FLOW_DISSECTOR_CONTINUE = 0x81
BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
@@ -2838,6 +2867,10 @@ const (
BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_MTU_CHK_SEGS = 0x1
+ BPF_MTU_CHK_RET_SUCCESS = 0x0
+ BPF_MTU_CHK_RET_FRAG_NEEDED = 0x1
+ BPF_MTU_CHK_RET_SEGS_TOOBIG = 0x2
BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
BPF_FD_TYPE_TRACEPOINT = 0x1
BPF_FD_TYPE_KPROBE = 0x2
@@ -2847,6 +2880,19 @@ const (
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1
BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2
BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4
+ BPF_CORE_FIELD_BYTE_OFFSET = 0x0
+ BPF_CORE_FIELD_BYTE_SIZE = 0x1
+ BPF_CORE_FIELD_EXISTS = 0x2
+ BPF_CORE_FIELD_SIGNED = 0x3
+ BPF_CORE_FIELD_LSHIFT_U64 = 0x4
+ BPF_CORE_FIELD_RSHIFT_U64 = 0x5
+ BPF_CORE_TYPE_ID_LOCAL = 0x6
+ BPF_CORE_TYPE_ID_TARGET = 0x7
+ BPF_CORE_TYPE_EXISTS = 0x8
+ BPF_CORE_TYPE_SIZE = 0x9
+ BPF_CORE_ENUMVAL_EXISTS = 0xa
+ BPF_CORE_ENUMVAL_VALUE = 0xb
+ BPF_CORE_TYPE_MATCHES = 0xc
)
const (
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 3723b2c2..96459007 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -405,7 +405,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW
// Process Status API (PSAPI)
-//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses
+//sys enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses
//sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules
//sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx
//sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation
@@ -1354,6 +1354,17 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) {
return syscall.EWINDOWS
}
+func EnumProcesses(processIds []uint32, bytesReturned *uint32) error {
+ // EnumProcesses syscall expects the size parameter to be in bytes, but the code generated with mksyscall uses
+ // the length of the processIds slice instead. Hence, this wrapper function is added to fix the discrepancy.
+ var p *uint32
+ if len(processIds) > 0 {
+ p = &processIds[0]
+ }
+ size := uint32(len(processIds) * 4)
+ return enumProcesses(p, size, bytesReturned)
+}
+
func Getpid() (pid int) { return int(GetCurrentProcessId()) }
func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) {
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index a81ea2c7..566dd3e3 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -3516,12 +3516,8 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u
return
}
-func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) {
- var _p0 *uint32
- if len(processIds) > 0 {
- _p0 = &processIds[0]
- }
- r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned)))
+func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index 165ede0f..03543bd4 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
// (from "version"). Select appropriate importer.
if len(data) > 0 {
switch data[0] {
- case 'i':
+ case 'v', 'c', 'd': // binary, till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
+
+ case 'i': // indexed, till go1.19
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
- case 'v', 'c', 'd':
- _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
- return pkg, err
-
- case 'u':
+ case 'u': // unified, from go1.20
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
return pkg, err
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index 6bb7168d..e84f19df 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -625,7 +625,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
}
if pkg.PkgPath == "unsafe" {
- pkg.GoFiles = nil // ignore fake unsafe.go file
+ pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929)
+ } else if len(pkg.CompiledGoFiles) == 0 {
+ // Work around for pre-go.1.11 versions of go list.
+ // TODO(matloob): they should be handled by the fallback.
+ // Can we delete this?
+ pkg.CompiledGoFiles = pkg.GoFiles
}
// Assume go list emits only absolute paths for Dir.
@@ -663,13 +668,6 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
response.Roots = append(response.Roots, pkg.ID)
}
- // Work around for pre-go.1.11 versions of go list.
- // TODO(matloob): they should be handled by the fallback.
- // Can we delete this?
- if len(pkg.CompiledGoFiles) == 0 {
- pkg.CompiledGoFiles = pkg.GoFiles
- }
-
// Temporary work-around for golang/go#39986. Parse filenames out of
// error messages. This happens if there are unrecoverable syntax
// errors in the source, so we can't match on a specific error message.
@@ -891,6 +889,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string {
// probably because you'd just get the TestMain.
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),
}
+
+ // golang/go#60456: with go1.21 and later, go list serves pgo variants, which
+ // can be costly to compute and may result in redundant processing for the
+ // caller. Disable these variants. If someone wants to add e.g. a NeedPGO
+ // mode flag, that should be a separate proposal.
+ if goVersion >= 21 {
+ fullargs = append(fullargs, "-pgo=off")
+ }
+
fullargs = append(fullargs, cfg.BuildFlags...)
fullargs = append(fullargs, "--")
fullargs = append(fullargs, words...)
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index 0f1505b8..632be722 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -308,6 +308,9 @@ type Package struct {
TypeErrors []types.Error
// GoFiles lists the absolute file paths of the package's Go source files.
+ // It may include files that should not be compiled, for example because
+ // they contain non-matching build tags, are documentary pseudo-files such as
+ // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing.
GoFiles []string
// CompiledGoFiles lists the absolute file paths of the package's source
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
deleted file mode 100644
index aa7dfacc..00000000
--- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ /dev/null
@@ -1,764 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package objectpath defines a naming scheme for types.Objects
-// (that is, named entities in Go programs) relative to their enclosing
-// package.
-//
-// Type-checker objects are canonical, so they are usually identified by
-// their address in memory (a pointer), but a pointer has meaning only
-// within one address space. By contrast, objectpath names allow the
-// identity of an object to be sent from one program to another,
-// establishing a correspondence between types.Object variables that are
-// distinct but logically equivalent.
-//
-// A single object may have multiple paths. In this example,
-//
-// type A struct{ X int }
-// type B A
-//
-// the field X has two paths due to its membership of both A and B.
-// The For(obj) function always returns one of these paths, arbitrarily
-// but consistently.
-package objectpath
-
-import (
- "fmt"
- "go/types"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/tools/internal/typeparams"
-
- _ "unsafe" // for go:linkname
-)
-
-// A Path is an opaque name that identifies a types.Object
-// relative to its package. Conceptually, the name consists of a
-// sequence of destructuring operations applied to the package scope
-// to obtain the original object.
-// The name does not include the package itself.
-type Path string
-
-// Encoding
-//
-// An object path is a textual and (with training) human-readable encoding
-// of a sequence of destructuring operators, starting from a types.Package.
-// The sequences represent a path through the package/object/type graph.
-// We classify these operators by their type:
-//
-// PO package->object Package.Scope.Lookup
-// OT object->type Object.Type
-// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
-// TO type->object Type.{At,Field,Method,Obj} [AFMO]
-//
-// All valid paths start with a package and end at an object
-// and thus may be defined by the regular language:
-//
-// objectpath = PO (OT TT* TO)*
-//
-// The concrete encoding follows directly:
-// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
-// - The only OT operator is Object.Type,
-// which we encode as '.' because dot cannot appear in an identifier.
-// - The TT operators are encoded as [EKPRUTC];
-// one of these (TypeParam) requires an integer operand,
-// which is encoded as a string of decimal digits.
-// - The TO operators are encoded as [AFMO];
-// three of these (At,Field,Method) require an integer operand,
-// which is encoded as a string of decimal digits.
-// These indices are stable across different representations
-// of the same package, even source and export data.
-// The indices used are implementation specific and may not correspond to
-// the argument to the go/types function.
-//
-// In the example below,
-//
-// package p
-//
-// type T interface {
-// f() (a string, b struct{ X int })
-// }
-//
-// field X has the path "T.UM0.RA1.F0",
-// representing the following sequence of operations:
-//
-// p.Lookup("T") T
-// .Type().Underlying().Method(0). f
-// .Type().Results().At(1) b
-// .Type().Field(0) X
-//
-// The encoding is not maximally compact---every R or P is
-// followed by an A, for example---but this simplifies the
-// encoder and decoder.
-const (
- // object->type operators
- opType = '.' // .Type() (Object)
-
- // type->type operators
- opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
- opKey = 'K' // .Key() (Map)
- opParams = 'P' // .Params() (Signature)
- opResults = 'R' // .Results() (Signature)
- opUnderlying = 'U' // .Underlying() (Named)
- opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
- opConstraint = 'C' // .Constraint() (TypeParam)
-
- // type->object operators
- opAt = 'A' // .At(i) (Tuple)
- opField = 'F' // .Field(i) (Struct)
- opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
- opObj = 'O' // .Obj() (Named, TypeParam)
-)
-
-// For is equivalent to new(Encoder).For(obj).
-//
-// It may be more efficient to reuse a single Encoder across several calls.
-func For(obj types.Object) (Path, error) {
- return new(Encoder).For(obj)
-}
-
-// An Encoder amortizes the cost of encoding the paths of multiple objects.
-// The zero value of an Encoder is ready to use.
-type Encoder struct {
- scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names()
- namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods()
-}
-
-// For returns the path to an object relative to its package,
-// or an error if the object is not accessible from the package's Scope.
-//
-// The For function guarantees to return a path only for the following objects:
-// - package-level types
-// - exported package-level non-types
-// - methods
-// - parameter and result variables
-// - struct fields
-// These objects are sufficient to define the API of their package.
-// The objects described by a package's export data are drawn from this set.
-//
-// For does not return a path for predeclared names, imported package
-// names, local names, and unexported package-level names (except
-// types).
-//
-// Example: given this definition,
-//
-// package p
-//
-// type T interface {
-// f() (a string, b struct{ X int })
-// }
-//
-// For(X) would return a path that denotes the following sequence of operations:
-//
-// p.Scope().Lookup("T") (TypeName T)
-// .Type().Underlying().Method(0). (method Func f)
-// .Type().Results().At(1) (field Var b)
-// .Type().Field(0) (field Var X)
-//
-// where p is the package (*types.Package) to which X belongs.
-func (enc *Encoder) For(obj types.Object) (Path, error) {
- pkg := obj.Pkg()
-
- // This table lists the cases of interest.
- //
- // Object Action
- // ------ ------
- // nil reject
- // builtin reject
- // pkgname reject
- // label reject
- // var
- // package-level accept
- // func param/result accept
- // local reject
- // struct field accept
- // const
- // package-level accept
- // local reject
- // func
- // package-level accept
- // init functions reject
- // concrete method accept
- // interface method accept
- // type
- // package-level accept
- // local reject
- //
- // The only accessible package-level objects are members of pkg itself.
- //
- // The cases are handled in four steps:
- //
- // 1. reject nil and builtin
- // 2. accept package-level objects
- // 3. reject obviously invalid objects
- // 4. search the API for the path to the param/result/field/method.
-
- // 1. reference to nil or builtin?
- if pkg == nil {
- return "", fmt.Errorf("predeclared %s has no path", obj)
- }
- scope := pkg.Scope()
-
- // 2. package-level object?
- if scope.Lookup(obj.Name()) == obj {
- // Only exported objects (and non-exported types) have a path.
- // Non-exported types may be referenced by other objects.
- if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
- return "", fmt.Errorf("no path for non-exported %v", obj)
- }
- return Path(obj.Name()), nil
- }
-
- // 3. Not a package-level object.
- // Reject obviously non-viable cases.
- switch obj := obj.(type) {
- case *types.TypeName:
- if _, ok := obj.Type().(*typeparams.TypeParam); !ok {
- // With the exception of type parameters, only package-level type names
- // have a path.
- return "", fmt.Errorf("no path for %v", obj)
- }
- case *types.Const, // Only package-level constants have a path.
- *types.Label, // Labels are function-local.
- *types.PkgName: // PkgNames are file-local.
- return "", fmt.Errorf("no path for %v", obj)
-
- case *types.Var:
- // Could be:
- // - a field (obj.IsField())
- // - a func parameter or result
- // - a local var.
- // Sadly there is no way to distinguish
- // a param/result from a local
- // so we must proceed to the find.
-
- case *types.Func:
- // A func, if not package-level, must be a method.
- if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
- return "", fmt.Errorf("func is not a method: %v", obj)
- }
-
- if path, ok := enc.concreteMethod(obj); ok {
- // Fast path for concrete methods that avoids looping over scope.
- return path, nil
- }
-
- default:
- panic(obj)
- }
-
- // 4. Search the API for the path to the var (field/param/result) or method.
-
- // First inspect package-level named types.
- // In the presence of path aliases, these give
- // the best paths because non-types may
- // refer to types, but not the reverse.
- empty := make([]byte, 0, 48) // initial space
- names := enc.scopeNames(scope)
- for _, name := range names {
- o := scope.Lookup(name)
- tname, ok := o.(*types.TypeName)
- if !ok {
- continue // handle non-types in second pass
- }
-
- path := append(empty, name...)
- path = append(path, opType)
-
- T := o.Type()
-
- if tname.IsAlias() {
- // type alias
- if r := find(obj, T, path, nil); r != nil {
- return Path(r), nil
- }
- } else {
- if named, _ := T.(*types.Named); named != nil {
- if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil {
- // generic named type
- return Path(r), nil
- }
- }
- // defined (named) type
- if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
- return Path(r), nil
- }
- }
- }
-
- // Then inspect everything else:
- // non-types, and declared methods of defined types.
- for _, name := range names {
- o := scope.Lookup(name)
- path := append(empty, name...)
- if _, ok := o.(*types.TypeName); !ok {
- if o.Exported() {
- // exported non-type (const, var, func)
- if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
- return Path(r), nil
- }
- }
- continue
- }
-
- // Inspect declared methods of defined types.
- if T, ok := o.Type().(*types.Named); ok {
- path = append(path, opType)
- // Note that method index here is always with respect
- // to canonical ordering of methods, regardless of how
- // they appear in the underlying type.
- for i, m := range enc.namedMethods(T) {
- path2 := appendOpArg(path, opMethod, i)
- if m == obj {
- return Path(path2), nil // found declared method
- }
- if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
- return Path(r), nil
- }
- }
- }
- }
-
- return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
-}
-
-func appendOpArg(path []byte, op byte, arg int) []byte {
- path = append(path, op)
- path = strconv.AppendInt(path, int64(arg), 10)
- return path
-}
-
-// concreteMethod returns the path for meth, which must have a non-nil receiver.
-// The second return value indicates success and may be false if the method is
-// an interface method or if it is an instantiated method.
-//
-// This function is just an optimization that avoids the general scope walking
-// approach. You are expected to fall back to the general approach if this
-// function fails.
-func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
- // Concrete methods can only be declared on package-scoped named types. For
- // that reason we can skip the expensive walk over the package scope: the
- // path will always be package -> named type -> method. We can trivially get
- // the type name from the receiver, and only have to look over the type's
- // methods to find the method index.
- //
- // Methods on generic types require special consideration, however. Consider
- // the following package:
- //
- // L1: type S[T any] struct{}
- // L2: func (recv S[A]) Foo() { recv.Bar() }
- // L3: func (recv S[B]) Bar() { }
- // L4: type Alias = S[int]
- // L5: func _[T any]() { var s S[int]; s.Foo() }
- //
- // The receivers of methods on generic types are instantiations. L2 and L3
- // instantiate S with the type-parameters A and B, which are scoped to the
- // respective methods. L4 and L5 each instantiate S with int. Each of these
- // instantiations has its own method set, full of methods (and thus objects)
- // with receivers whose types are the respective instantiations. In other
- // words, we have
- //
- // S[A].Foo, S[A].Bar
- // S[B].Foo, S[B].Bar
- // S[int].Foo, S[int].Bar
- //
- // We may thus be trying to produce object paths for any of these objects.
- //
- // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
- // and S.Bar, which are the paths that this function naturally produces.
- //
- // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
- // don't correspond to the origin methods. For S[int], this is significant.
- // The most precise object path for S[int].Foo, for example, is Alias.Foo,
- // not S.Foo. Our function, however, would produce S.Foo, which would
- // resolve to a different object.
- //
- // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
- // still the correct paths, since only the origin methods have meaningful
- // paths. But this is likely only true for trivial cases and has edge cases.
- // Since this function is only an optimization, we err on the side of giving
- // up, deferring to the slower but definitely correct algorithm. Most users
- // of objectpath will only be giving us origin methods, anyway, as referring
- // to instantiated methods is usually not useful.
-
- if typeparams.OriginMethod(meth) != meth {
- return "", false
- }
-
- recvT := meth.Type().(*types.Signature).Recv().Type()
- if ptr, ok := recvT.(*types.Pointer); ok {
- recvT = ptr.Elem()
- }
-
- named, ok := recvT.(*types.Named)
- if !ok {
- return "", false
- }
-
- if types.IsInterface(named) {
- // Named interfaces don't have to be package-scoped
- //
- // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
- // methods, too, I think.
- return "", false
- }
-
- // Preallocate space for the name, opType, opMethod, and some digits.
- name := named.Obj().Name()
- path := make([]byte, 0, len(name)+8)
- path = append(path, name...)
- path = append(path, opType)
- for i, m := range enc.namedMethods(named) {
- if m == meth {
- path = appendOpArg(path, opMethod, i)
- return Path(path), true
- }
- }
-
- // Due to golang/go#59944, go/types fails to associate the receiver with
- // certain methods on cgo types.
- //
- // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go
- // versions gopls supports.
- return "", false
- // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named)))
-}
-
-// find finds obj within type T, returning the path to it, or nil if not found.
-//
-// The seen map is used to short circuit cycles through type parameters. If
-// nil, it will be allocated as necessary.
-func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
- switch T := T.(type) {
- case *types.Basic, *types.Named:
- // Named types belonging to pkg were handled already,
- // so T must belong to another package. No path.
- return nil
- case *types.Pointer:
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Slice:
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Array:
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Chan:
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Map:
- if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
- return r
- }
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Signature:
- if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil {
- return r
- }
- if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
- return r
- }
- return find(obj, T.Results(), append(path, opResults), seen)
- case *types.Struct:
- for i := 0; i < T.NumFields(); i++ {
- fld := T.Field(i)
- path2 := appendOpArg(path, opField, i)
- if fld == obj {
- return path2 // found field var
- }
- if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
- return r
- }
- }
- return nil
- case *types.Tuple:
- for i := 0; i < T.Len(); i++ {
- v := T.At(i)
- path2 := appendOpArg(path, opAt, i)
- if v == obj {
- return path2 // found param/result var
- }
- if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
- return r
- }
- }
- return nil
- case *types.Interface:
- for i := 0; i < T.NumMethods(); i++ {
- m := T.Method(i)
- path2 := appendOpArg(path, opMethod, i)
- if m == obj {
- return path2 // found interface method
- }
- if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
- return r
- }
- }
- return nil
- case *typeparams.TypeParam:
- name := T.Obj()
- if name == obj {
- return append(path, opObj)
- }
- if seen[name] {
- return nil
- }
- if seen == nil {
- seen = make(map[*types.TypeName]bool)
- }
- seen[name] = true
- if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
- return r
- }
- return nil
- }
- panic(T)
-}
-
-func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
- for i := 0; i < list.Len(); i++ {
- tparam := list.At(i)
- path2 := appendOpArg(path, opTypeParam, i)
- if r := find(obj, tparam, path2, seen); r != nil {
- return r
- }
- }
- return nil
-}
-
-// Object returns the object denoted by path p within the package pkg.
-func Object(pkg *types.Package, p Path) (types.Object, error) {
- if p == "" {
- return nil, fmt.Errorf("empty path")
- }
-
- pathstr := string(p)
- var pkgobj, suffix string
- if dot := strings.IndexByte(pathstr, opType); dot < 0 {
- pkgobj = pathstr
- } else {
- pkgobj = pathstr[:dot]
- suffix = pathstr[dot:] // suffix starts with "."
- }
-
- obj := pkg.Scope().Lookup(pkgobj)
- if obj == nil {
- return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
- }
-
- // abstraction of *types.{Pointer,Slice,Array,Chan,Map}
- type hasElem interface {
- Elem() types.Type
- }
- // abstraction of *types.{Named,Signature}
- type hasTypeParams interface {
- TypeParams() *typeparams.TypeParamList
- }
- // abstraction of *types.{Named,TypeParam}
- type hasObj interface {
- Obj() *types.TypeName
- }
-
- // The loop state is the pair (t, obj),
- // exactly one of which is non-nil, initially obj.
- // All suffixes start with '.' (the only object->type operation),
- // followed by optional type->type operations,
- // then a type->object operation.
- // The cycle then repeats.
- var t types.Type
- for suffix != "" {
- code := suffix[0]
- suffix = suffix[1:]
-
- // Codes [AFM] have an integer operand.
- var index int
- switch code {
- case opAt, opField, opMethod, opTypeParam:
- rest := strings.TrimLeft(suffix, "0123456789")
- numerals := suffix[:len(suffix)-len(rest)]
- suffix = rest
- i, err := strconv.Atoi(numerals)
- if err != nil {
- return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
- }
- index = int(i)
- case opObj:
- // no operand
- default:
- // The suffix must end with a type->object operation.
- if suffix == "" {
- return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
- }
- }
-
- if code == opType {
- if t != nil {
- return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
- }
- t = obj.Type()
- obj = nil
- continue
- }
-
- if t == nil {
- return nil, fmt.Errorf("invalid path: code %q in object context", code)
- }
-
- // Inv: t != nil, obj == nil
-
- switch code {
- case opElem:
- hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
- }
- t = hasElem.Elem()
-
- case opKey:
- mapType, ok := t.(*types.Map)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
- }
- t = mapType.Key()
-
- case opParams:
- sig, ok := t.(*types.Signature)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
- }
- t = sig.Params()
-
- case opResults:
- sig, ok := t.(*types.Signature)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
- }
- t = sig.Results()
-
- case opUnderlying:
- named, ok := t.(*types.Named)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
- }
- t = named.Underlying()
-
- case opTypeParam:
- hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
- }
- tparams := hasTypeParams.TypeParams()
- if n := tparams.Len(); index >= n {
- return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
- }
- t = tparams.At(index)
-
- case opConstraint:
- tparam, ok := t.(*typeparams.TypeParam)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
- }
- t = tparam.Constraint()
-
- case opAt:
- tuple, ok := t.(*types.Tuple)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
- }
- if n := tuple.Len(); index >= n {
- return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
- }
- obj = tuple.At(index)
- t = nil
-
- case opField:
- structType, ok := t.(*types.Struct)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
- }
- if n := structType.NumFields(); index >= n {
- return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
- }
- obj = structType.Field(index)
- t = nil
-
- case opMethod:
- switch t := t.(type) {
- case *types.Interface:
- if index >= t.NumMethods() {
- return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
- }
- obj = t.Method(index) // Id-ordered
-
- case *types.Named:
- methods := namedMethods(t) // (unmemoized)
- if index >= len(methods) {
- return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods))
- }
- obj = methods[index] // Id-ordered
-
- default:
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
- }
- t = nil
-
- case opObj:
- hasObj, ok := t.(hasObj)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
- }
- obj = hasObj.Obj()
- t = nil
-
- default:
- return nil, fmt.Errorf("invalid path: unknown code %q", code)
- }
- }
-
- if obj.Pkg() != pkg {
- return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
- }
-
- return obj, nil // success
-}
-
-// namedMethods returns the methods of a Named type in ascending Id order.
-func namedMethods(named *types.Named) []*types.Func {
- methods := make([]*types.Func, named.NumMethods())
- for i := range methods {
- methods[i] = named.Method(i)
- }
- sort.Slice(methods, func(i, j int) bool {
- return methods[i].Id() < methods[j].Id()
- })
- return methods
-}
-
-// namedMethods is a memoization of the namedMethods function. Callers must not modify the result.
-func (enc *Encoder) namedMethods(named *types.Named) []*types.Func {
- m := enc.namedMethodsMemo
- if m == nil {
- m = make(map[*types.Named][]*types.Func)
- enc.namedMethodsMemo = m
- }
- methods, ok := m[named]
- if !ok {
- methods = namedMethods(named) // allocates and sorts
- m[named] = methods
- }
- return methods
-}
-
-// scopeNames is a memoization of scope.Names. Callers must not modify the result.
-func (enc *Encoder) scopeNames(scope *types.Scope) []string {
- m := enc.scopeNamesMemo
- if m == nil {
- m = make(map[*types.Scope][]string)
- enc.scopeNamesMemo = m
- }
- names, ok := m[scope]
- if !ok {
- names = scope.Names() // allocates and sorts
- m[scope] = names
- }
- return names
-}
diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go
new file mode 100644
index 00000000..ff2f2ecd
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go
@@ -0,0 +1,59 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tag provides the labels used for telemetry throughout gopls.
+package tag
+
+import (
+ "golang.org/x/tools/internal/event/keys"
+)
+
+var (
+ // create the label keys we use
+ Method = keys.NewString("method", "")
+ StatusCode = keys.NewString("status.code", "")
+ StatusMessage = keys.NewString("status.message", "")
+ RPCID = keys.NewString("id", "")
+ RPCDirection = keys.NewString("direction", "")
+ File = keys.NewString("file", "")
+ Directory = keys.New("directory", "")
+ URI = keys.New("URI", "")
+ Package = keys.NewString("package", "") // Package ID
+ PackagePath = keys.NewString("package_path", "")
+ Query = keys.New("query", "")
+ Snapshot = keys.NewUInt64("snapshot", "")
+ Operation = keys.NewString("operation", "")
+
+ Position = keys.New("position", "")
+ Category = keys.NewString("category", "")
+ PackageCount = keys.NewInt("packages", "")
+ Files = keys.New("files", "")
+ Port = keys.NewInt("port", "")
+ Type = keys.New("type", "")
+ HoverKind = keys.NewString("hoverkind", "")
+
+ NewServer = keys.NewString("new_server", "A new server was added")
+ EndServer = keys.NewString("end_server", "A server was shut down")
+
+ ServerID = keys.NewString("server", "The server ID an event is related to")
+ Logfile = keys.NewString("logfile", "")
+ DebugAddress = keys.NewString("debug_address", "")
+ GoplsPath = keys.NewString("gopls_path", "")
+ ClientID = keys.NewString("client_id", "")
+
+ Level = keys.NewInt("level", "The logging level")
+)
+
+var (
+ // create the stats we measure
+ Started = keys.NewInt64("started", "Count of started RPCs.")
+ ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes)
+ SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes)
+ Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds)
+)
+
+const (
+ Inbound = "in"
+ Outbound = "out"
+)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go
deleted file mode 100644
index 30582ed6..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go
+++ /dev/null
@@ -1,852 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
-// see that file for specification of the format.
-
-package gcimporter
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "go/constant"
- "go/token"
- "go/types"
- "math"
- "math/big"
- "sort"
- "strings"
-)
-
-// If debugFormat is set, each integer and string value is preceded by a marker
-// and position information in the encoding. This mechanism permits an importer
-// to recognize immediately when it is out of sync. The importer recognizes this
-// mode automatically (i.e., it can import export data produced with debugging
-// support even if debugFormat is not set at the time of import). This mode will
-// lead to massively larger export data (by a factor of 2 to 3) and should only
-// be enabled during development and debugging.
-//
-// NOTE: This flag is the first flag to enable if importing dies because of
-// (suspected) format errors, and whenever a change is made to the format.
-const debugFormat = false // default: false
-
-// Current export format version. Increase with each format change.
-//
-// Note: The latest binary (non-indexed) export format is at version 6.
-// This exporter is still at level 4, but it doesn't matter since
-// the binary importer can handle older versions just fine.
-//
-// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
-// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE
-// 4: type name objects support type aliases, uses aliasTag
-// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
-// 2: removed unused bool in ODCL export (compiler only)
-// 1: header format change (more regular), export package for _ struct fields
-// 0: Go1.7 encoding
-const exportVersion = 4
-
-// trackAllTypes enables cycle tracking for all types, not just named
-// types. The existing compiler invariants assume that unnamed types
-// that are not completely set up are not used, or else there are spurious
-// errors.
-// If disabled, only named types are tracked, possibly leading to slightly
-// less efficient encoding in rare cases. It also prevents the export of
-// some corner-case type declarations (but those are not handled correctly
-// with with the textual export format either).
-// TODO(gri) enable and remove once issues caused by it are fixed
-const trackAllTypes = false
-
-type exporter struct {
- fset *token.FileSet
- out bytes.Buffer
-
- // object -> index maps, indexed in order of serialization
- strIndex map[string]int
- pkgIndex map[*types.Package]int
- typIndex map[types.Type]int
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
-
- // debugging support
- written int // bytes written
- indent int // for trace
-}
-
-// internalError represents an error generated inside this package.
-type internalError string
-
-func (e internalError) Error() string { return "gcimporter: " + string(e) }
-
-func internalErrorf(format string, args ...interface{}) error {
- return internalError(fmt.Sprintf(format, args...))
-}
-
-// BExportData returns binary export data for pkg.
-// If no file set is provided, position info will be missing.
-func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
- if !debug {
- defer func() {
- if e := recover(); e != nil {
- if ierr, ok := e.(internalError); ok {
- err = ierr
- return
- }
- // Not an internal error; panic again.
- panic(e)
- }
- }()
- }
-
- p := exporter{
- fset: fset,
- strIndex: map[string]int{"": 0}, // empty string is mapped to 0
- pkgIndex: make(map[*types.Package]int),
- typIndex: make(map[types.Type]int),
- posInfoFormat: true, // TODO(gri) might become a flag, eventually
- }
-
- // write version info
- // The version string must start with "version %d" where %d is the version
- // number. Additional debugging information may follow after a blank; that
- // text is ignored by the importer.
- p.rawStringln(fmt.Sprintf("version %d", exportVersion))
- var debug string
- if debugFormat {
- debug = "debug"
- }
- p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
- p.bool(trackAllTypes)
- p.bool(p.posInfoFormat)
-
- // --- generic export data ---
-
- // populate type map with predeclared "known" types
- for index, typ := range predeclared() {
- p.typIndex[typ] = index
- }
- if len(p.typIndex) != len(predeclared()) {
- return nil, internalError("duplicate entries in type map?")
- }
-
- // write package data
- p.pkg(pkg, true)
- if trace {
- p.tracef("\n")
- }
-
- // write objects
- objcount := 0
- scope := pkg.Scope()
- for _, name := range scope.Names() {
- if !token.IsExported(name) {
- continue
- }
- if trace {
- p.tracef("\n")
- }
- p.obj(scope.Lookup(name))
- objcount++
- }
-
- // indicate end of list
- if trace {
- p.tracef("\n")
- }
- p.tag(endTag)
-
- // for self-verification only (redundant)
- p.int(objcount)
-
- if trace {
- p.tracef("\n")
- }
-
- // --- end of export data ---
-
- return p.out.Bytes(), nil
-}
-
-func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
- if pkg == nil {
- panic(internalError("unexpected nil pkg"))
- }
-
- // if we saw the package before, write its index (>= 0)
- if i, ok := p.pkgIndex[pkg]; ok {
- p.index('P', i)
- return
- }
-
- // otherwise, remember the package, write the package tag (< 0) and package data
- if trace {
- p.tracef("P%d = { ", len(p.pkgIndex))
- defer p.tracef("} ")
- }
- p.pkgIndex[pkg] = len(p.pkgIndex)
-
- p.tag(packageTag)
- p.string(pkg.Name())
- if emptypath {
- p.string("")
- } else {
- p.string(pkg.Path())
- }
-}
-
-func (p *exporter) obj(obj types.Object) {
- switch obj := obj.(type) {
- case *types.Const:
- p.tag(constTag)
- p.pos(obj)
- p.qualifiedName(obj)
- p.typ(obj.Type())
- p.value(obj.Val())
-
- case *types.TypeName:
- if obj.IsAlias() {
- p.tag(aliasTag)
- p.pos(obj)
- p.qualifiedName(obj)
- } else {
- p.tag(typeTag)
- }
- p.typ(obj.Type())
-
- case *types.Var:
- p.tag(varTag)
- p.pos(obj)
- p.qualifiedName(obj)
- p.typ(obj.Type())
-
- case *types.Func:
- p.tag(funcTag)
- p.pos(obj)
- p.qualifiedName(obj)
- sig := obj.Type().(*types.Signature)
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
-
- default:
- panic(internalErrorf("unexpected object %v (%T)", obj, obj))
- }
-}
-
-func (p *exporter) pos(obj types.Object) {
- if !p.posInfoFormat {
- return
- }
-
- file, line := p.fileLine(obj)
- if file == p.prevFile {
- // common case: write line delta
- // delta == 0 means different file or no line change
- delta := line - p.prevLine
- p.int(delta)
- if delta == 0 {
- p.int(-1) // -1 means no file change
- }
- } else {
- // different file
- p.int(0)
- // Encode filename as length of common prefix with previous
- // filename, followed by (possibly empty) suffix. Filenames
- // frequently share path prefixes, so this can save a lot
- // of space and make export data size less dependent on file
- // path length. The suffix is unlikely to be empty because
- // file names tend to end in ".go".
- n := commonPrefixLen(p.prevFile, file)
- p.int(n) // n >= 0
- p.string(file[n:]) // write suffix only
- p.prevFile = file
- p.int(line)
- }
- p.prevLine = line
-}
-
-func (p *exporter) fileLine(obj types.Object) (file string, line int) {
- if p.fset != nil {
- pos := p.fset.Position(obj.Pos())
- file = pos.Filename
- line = pos.Line
- }
- return
-}
-
-func commonPrefixLen(a, b string) int {
- if len(a) > len(b) {
- a, b = b, a
- }
- // len(a) <= len(b)
- i := 0
- for i < len(a) && a[i] == b[i] {
- i++
- }
- return i
-}
-
-func (p *exporter) qualifiedName(obj types.Object) {
- p.string(obj.Name())
- p.pkg(obj.Pkg(), false)
-}
-
-func (p *exporter) typ(t types.Type) {
- if t == nil {
- panic(internalError("nil type"))
- }
-
- // Possible optimization: Anonymous pointer types *T where
- // T is a named type are common. We could canonicalize all
- // such types *T to a single type PT = *T. This would lead
- // to at most one *T entry in typIndex, and all future *T's
- // would be encoded as the respective index directly. Would
- // save 1 byte (pointerTag) per *T and reduce the typIndex
- // size (at the cost of a canonicalization map). We can do
- // this later, without encoding format change.
-
- // if we saw the type before, write its index (>= 0)
- if i, ok := p.typIndex[t]; ok {
- p.index('T', i)
- return
- }
-
- // otherwise, remember the type, write the type tag (< 0) and type data
- if trackAllTypes {
- if trace {
- p.tracef("T%d = {>\n", len(p.typIndex))
- defer p.tracef("<\n} ")
- }
- p.typIndex[t] = len(p.typIndex)
- }
-
- switch t := t.(type) {
- case *types.Named:
- if !trackAllTypes {
- // if we don't track all types, track named types now
- p.typIndex[t] = len(p.typIndex)
- }
-
- p.tag(namedTag)
- p.pos(t.Obj())
- p.qualifiedName(t.Obj())
- p.typ(t.Underlying())
- if !types.IsInterface(t) {
- p.assocMethods(t)
- }
-
- case *types.Array:
- p.tag(arrayTag)
- p.int64(t.Len())
- p.typ(t.Elem())
-
- case *types.Slice:
- p.tag(sliceTag)
- p.typ(t.Elem())
-
- case *dddSlice:
- p.tag(dddTag)
- p.typ(t.elem)
-
- case *types.Struct:
- p.tag(structTag)
- p.fieldList(t)
-
- case *types.Pointer:
- p.tag(pointerTag)
- p.typ(t.Elem())
-
- case *types.Signature:
- p.tag(signatureTag)
- p.paramList(t.Params(), t.Variadic())
- p.paramList(t.Results(), false)
-
- case *types.Interface:
- p.tag(interfaceTag)
- p.iface(t)
-
- case *types.Map:
- p.tag(mapTag)
- p.typ(t.Key())
- p.typ(t.Elem())
-
- case *types.Chan:
- p.tag(chanTag)
- p.int(int(3 - t.Dir())) // hack
- p.typ(t.Elem())
-
- default:
- panic(internalErrorf("unexpected type %T: %s", t, t))
- }
-}
-
-func (p *exporter) assocMethods(named *types.Named) {
- // Sort methods (for determinism).
- var methods []*types.Func
- for i := 0; i < named.NumMethods(); i++ {
- methods = append(methods, named.Method(i))
- }
- sort.Sort(methodsByName(methods))
-
- p.int(len(methods))
-
- if trace && methods != nil {
- p.tracef("associated methods {>\n")
- }
-
- for i, m := range methods {
- if trace && i > 0 {
- p.tracef("\n")
- }
-
- p.pos(m)
- name := m.Name()
- p.string(name)
- if !exported(name) {
- p.pkg(m.Pkg(), false)
- }
-
- sig := m.Type().(*types.Signature)
- p.paramList(types.NewTuple(sig.Recv()), false)
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
- p.int(0) // dummy value for go:nointerface pragma - ignored by importer
- }
-
- if trace && methods != nil {
- p.tracef("<\n} ")
- }
-}
-
-type methodsByName []*types.Func
-
-func (x methodsByName) Len() int { return len(x) }
-func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
-
-func (p *exporter) fieldList(t *types.Struct) {
- if trace && t.NumFields() > 0 {
- p.tracef("fields {>\n")
- defer p.tracef("<\n} ")
- }
-
- p.int(t.NumFields())
- for i := 0; i < t.NumFields(); i++ {
- if trace && i > 0 {
- p.tracef("\n")
- }
- p.field(t.Field(i))
- p.string(t.Tag(i))
- }
-}
-
-func (p *exporter) field(f *types.Var) {
- if !f.IsField() {
- panic(internalError("field expected"))
- }
-
- p.pos(f)
- p.fieldName(f)
- p.typ(f.Type())
-}
-
-func (p *exporter) iface(t *types.Interface) {
- // TODO(gri): enable importer to load embedded interfaces,
- // then emit Embeddeds and ExplicitMethods separately here.
- p.int(0)
-
- n := t.NumMethods()
- if trace && n > 0 {
- p.tracef("methods {>\n")
- defer p.tracef("<\n} ")
- }
- p.int(n)
- for i := 0; i < n; i++ {
- if trace && i > 0 {
- p.tracef("\n")
- }
- p.method(t.Method(i))
- }
-}
-
-func (p *exporter) method(m *types.Func) {
- sig := m.Type().(*types.Signature)
- if sig.Recv() == nil {
- panic(internalError("method expected"))
- }
-
- p.pos(m)
- p.string(m.Name())
- if m.Name() != "_" && !token.IsExported(m.Name()) {
- p.pkg(m.Pkg(), false)
- }
-
- // interface method; no need to encode receiver.
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
-}
-
-func (p *exporter) fieldName(f *types.Var) {
- name := f.Name()
-
- if f.Anonymous() {
- // anonymous field - we distinguish between 3 cases:
- // 1) field name matches base type name and is exported
- // 2) field name matches base type name and is not exported
- // 3) field name doesn't match base type name (alias name)
- bname := basetypeName(f.Type())
- if name == bname {
- if token.IsExported(name) {
- name = "" // 1) we don't need to know the field name or package
- } else {
- name = "?" // 2) use unexported name "?" to force package export
- }
- } else {
- // 3) indicate alias and export name as is
- // (this requires an extra "@" but this is a rare case)
- p.string("@")
- }
- }
-
- p.string(name)
- if name != "" && !token.IsExported(name) {
- p.pkg(f.Pkg(), false)
- }
-}
-
-func basetypeName(typ types.Type) string {
- switch typ := deref(typ).(type) {
- case *types.Basic:
- return typ.Name()
- case *types.Named:
- return typ.Obj().Name()
- default:
- return "" // unnamed type
- }
-}
-
-func (p *exporter) paramList(params *types.Tuple, variadic bool) {
- // use negative length to indicate unnamed parameters
- // (look at the first parameter only since either all
- // names are present or all are absent)
- n := params.Len()
- if n > 0 && params.At(0).Name() == "" {
- n = -n
- }
- p.int(n)
- for i := 0; i < params.Len(); i++ {
- q := params.At(i)
- t := q.Type()
- if variadic && i == params.Len()-1 {
- t = &dddSlice{t.(*types.Slice).Elem()}
- }
- p.typ(t)
- if n > 0 {
- name := q.Name()
- p.string(name)
- if name != "_" {
- p.pkg(q.Pkg(), false)
- }
- }
- p.string("") // no compiler-specific info
- }
-}
-
-func (p *exporter) value(x constant.Value) {
- if trace {
- p.tracef("= ")
- }
-
- switch x.Kind() {
- case constant.Bool:
- tag := falseTag
- if constant.BoolVal(x) {
- tag = trueTag
- }
- p.tag(tag)
-
- case constant.Int:
- if v, exact := constant.Int64Val(x); exact {
- // common case: x fits into an int64 - use compact encoding
- p.tag(int64Tag)
- p.int64(v)
- return
- }
- // uncommon case: large x - use float encoding
- // (powers of 2 will be encoded efficiently with exponent)
- p.tag(floatTag)
- p.float(constant.ToFloat(x))
-
- case constant.Float:
- p.tag(floatTag)
- p.float(x)
-
- case constant.Complex:
- p.tag(complexTag)
- p.float(constant.Real(x))
- p.float(constant.Imag(x))
-
- case constant.String:
- p.tag(stringTag)
- p.string(constant.StringVal(x))
-
- case constant.Unknown:
- // package contains type errors
- p.tag(unknownTag)
-
- default:
- panic(internalErrorf("unexpected value %v (%T)", x, x))
- }
-}
-
-func (p *exporter) float(x constant.Value) {
- if x.Kind() != constant.Float {
- panic(internalErrorf("unexpected constant %v, want float", x))
- }
- // extract sign (there is no -0)
- sign := constant.Sign(x)
- if sign == 0 {
- // x == 0
- p.int(0)
- return
- }
- // x != 0
-
- var f big.Float
- if v, exact := constant.Float64Val(x); exact {
- // float64
- f.SetFloat64(v)
- } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
- // TODO(gri): add big.Rat accessor to constant.Value.
- r := valueToRat(num)
- f.SetRat(r.Quo(r, valueToRat(denom)))
- } else {
- // Value too large to represent as a fraction => inaccessible.
- // TODO(gri): add big.Float accessor to constant.Value.
- f.SetFloat64(math.MaxFloat64) // FIXME
- }
-
- // extract exponent such that 0.5 <= m < 1.0
- var m big.Float
- exp := f.MantExp(&m)
-
- // extract mantissa as *big.Int
- // - set exponent large enough so mant satisfies mant.IsInt()
- // - get *big.Int from mant
- m.SetMantExp(&m, int(m.MinPrec()))
- mant, acc := m.Int(nil)
- if acc != big.Exact {
- panic(internalError("internal error"))
- }
-
- p.int(sign)
- p.int(exp)
- p.string(string(mant.Bytes()))
-}
-
-func valueToRat(x constant.Value) *big.Rat {
- // Convert little-endian to big-endian.
- // I can't believe this is necessary.
- bytes := constant.Bytes(x)
- for i := 0; i < len(bytes)/2; i++ {
- bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
- }
- return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
-}
-
-func (p *exporter) bool(b bool) bool {
- if trace {
- p.tracef("[")
- defer p.tracef("= %v] ", b)
- }
-
- x := 0
- if b {
- x = 1
- }
- p.int(x)
- return b
-}
-
-// ----------------------------------------------------------------------------
-// Low-level encoders
-
-func (p *exporter) index(marker byte, index int) {
- if index < 0 {
- panic(internalError("invalid index < 0"))
- }
- if debugFormat {
- p.marker('t')
- }
- if trace {
- p.tracef("%c%d ", marker, index)
- }
- p.rawInt64(int64(index))
-}
-
-func (p *exporter) tag(tag int) {
- if tag >= 0 {
- panic(internalError("invalid tag >= 0"))
- }
- if debugFormat {
- p.marker('t')
- }
- if trace {
- p.tracef("%s ", tagString[-tag])
- }
- p.rawInt64(int64(tag))
-}
-
-func (p *exporter) int(x int) {
- p.int64(int64(x))
-}
-
-func (p *exporter) int64(x int64) {
- if debugFormat {
- p.marker('i')
- }
- if trace {
- p.tracef("%d ", x)
- }
- p.rawInt64(x)
-}
-
-func (p *exporter) string(s string) {
- if debugFormat {
- p.marker('s')
- }
- if trace {
- p.tracef("%q ", s)
- }
- // if we saw the string before, write its index (>= 0)
- // (the empty string is mapped to 0)
- if i, ok := p.strIndex[s]; ok {
- p.rawInt64(int64(i))
- return
- }
- // otherwise, remember string and write its negative length and bytes
- p.strIndex[s] = len(p.strIndex)
- p.rawInt64(-int64(len(s)))
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
-}
-
-// marker emits a marker byte and position information which makes
-// it easy for a reader to detect if it is "out of sync". Used for
-// debugFormat format only.
-func (p *exporter) marker(m byte) {
- p.rawByte(m)
- // Enable this for help tracking down the location
- // of an incorrect marker when running in debugFormat.
- if false && trace {
- p.tracef("#%d ", p.written)
- }
- p.rawInt64(int64(p.written))
-}
-
-// rawInt64 should only be used by low-level encoders.
-func (p *exporter) rawInt64(x int64) {
- var tmp [binary.MaxVarintLen64]byte
- n := binary.PutVarint(tmp[:], x)
- for i := 0; i < n; i++ {
- p.rawByte(tmp[i])
- }
-}
-
-// rawStringln should only be used to emit the initial version string.
-func (p *exporter) rawStringln(s string) {
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
- p.rawByte('\n')
-}
-
-// rawByte is the bottleneck interface to write to p.out.
-// rawByte escapes b as follows (any encoding does that
-// hides '$'):
-//
-// '$' => '|' 'S'
-// '|' => '|' '|'
-//
-// Necessary so other tools can find the end of the
-// export data by searching for "$$".
-// rawByte should only be used by low-level encoders.
-func (p *exporter) rawByte(b byte) {
- switch b {
- case '$':
- // write '$' as '|' 'S'
- b = 'S'
- fallthrough
- case '|':
- // write '|' as '|' '|'
- p.out.WriteByte('|')
- p.written++
- }
- p.out.WriteByte(b)
- p.written++
-}
-
-// tracef is like fmt.Printf but it rewrites the format string
-// to take care of indentation.
-func (p *exporter) tracef(format string, args ...interface{}) {
- if strings.ContainsAny(format, "<>\n") {
- var buf bytes.Buffer
- for i := 0; i < len(format); i++ {
- // no need to deal with runes
- ch := format[i]
- switch ch {
- case '>':
- p.indent++
- continue
- case '<':
- p.indent--
- continue
- }
- buf.WriteByte(ch)
- if ch == '\n' {
- for j := p.indent; j > 0; j-- {
- buf.WriteString(". ")
- }
- }
- }
- format = buf.String()
- }
- fmt.Printf(format, args...)
-}
-
-// Debugging support.
-// (tagString is only used when tracing is enabled)
-var tagString = [...]string{
- // Packages
- -packageTag: "package",
-
- // Types
- -namedTag: "named type",
- -arrayTag: "array",
- -sliceTag: "slice",
- -dddTag: "ddd",
- -structTag: "struct",
- -pointerTag: "pointer",
- -signatureTag: "signature",
- -interfaceTag: "interface",
- -mapTag: "map",
- -chanTag: "chan",
-
- // Values
- -falseTag: "false",
- -trueTag: "true",
- -int64Tag: "int64",
- -floatTag: "float",
- -fractionTag: "fraction",
- -complexTag: "complex",
- -stringTag: "string",
- -unknownTag: "unknown",
-
- // Type aliases
- -aliasTag: "alias",
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
index b85de014..d98b0db2 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -2,340 +2,24 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
+// This file contains the remaining vestiges of
+// $GOROOT/src/go/internal/gcimporter/bimport.go.
package gcimporter
import (
- "encoding/binary"
"fmt"
- "go/constant"
"go/token"
"go/types"
- "sort"
- "strconv"
- "strings"
"sync"
- "unicode"
- "unicode/utf8"
)
-type importer struct {
- imports map[string]*types.Package
- data []byte
- importpath string
- buf []byte // for reading strings
- version int // export format version
-
- // object lists
- strList []string // in order of appearance
- pathList []string // in order of appearance
- pkgList []*types.Package // in order of appearance
- typList []types.Type // in order of appearance
- interfaceList []*types.Interface // for delayed completion only
- trackAllTypes bool
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
- fake fakeFileSet
-
- // debugging support
- debugFormat bool
- read int // bytes read
-}
-
-// BImportData imports a package from the serialized package data
-// and returns the number of bytes consumed and a reference to the package.
-// If the export data version is not recognized or the format is otherwise
-// compromised, an error is returned.
-func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
- // catch panics and return them as errors
- const currentVersion = 6
- version := -1 // unknown version
- defer func() {
- if e := recover(); e != nil {
- // Return a (possibly nil or incomplete) package unchanged (see #16088).
- if version > currentVersion {
- err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
- } else {
- err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
- }
- }
- }()
-
- p := importer{
- imports: imports,
- data: data,
- importpath: path,
- version: version,
- strList: []string{""}, // empty string is mapped to 0
- pathList: []string{""}, // empty string is mapped to 0
- fake: fakeFileSet{
- fset: fset,
- files: make(map[string]*fileInfo),
- },
- }
- defer p.fake.setLines() // set lines for files in fset
-
- // read version info
- var versionstr string
- if b := p.rawByte(); b == 'c' || b == 'd' {
- // Go1.7 encoding; first byte encodes low-level
- // encoding format (compact vs debug).
- // For backward-compatibility only (avoid problems with
- // old installed packages). Newly compiled packages use
- // the extensible format string.
- // TODO(gri) Remove this support eventually; after Go1.8.
- if b == 'd' {
- p.debugFormat = true
- }
- p.trackAllTypes = p.rawByte() == 'a'
- p.posInfoFormat = p.int() != 0
- versionstr = p.string()
- if versionstr == "v1" {
- version = 0
- }
- } else {
- // Go1.8 extensible encoding
- // read version string and extract version number (ignore anything after the version number)
- versionstr = p.rawStringln(b)
- if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
- if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
- version = v
- }
- }
- }
- p.version = version
-
- // read version specific flags - extend as necessary
- switch p.version {
- // case currentVersion:
- // ...
- // fallthrough
- case currentVersion, 5, 4, 3, 2, 1:
- p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
- p.trackAllTypes = p.int() != 0
- p.posInfoFormat = p.int() != 0
- case 0:
- // Go1.7 encoding format - nothing to do here
- default:
- errorf("unknown bexport format version %d (%q)", p.version, versionstr)
- }
-
- // --- generic export data ---
-
- // populate typList with predeclared "known" types
- p.typList = append(p.typList, predeclared()...)
-
- // read package data
- pkg = p.pkg()
-
- // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
- objcount := 0
- for {
- tag := p.tagOrIndex()
- if tag == endTag {
- break
- }
- p.obj(tag)
- objcount++
- }
-
- // self-verification
- if count := p.int(); count != objcount {
- errorf("got %d objects; want %d", objcount, count)
- }
-
- // ignore compiler-specific import data
-
- // complete interfaces
- // TODO(gri) re-investigate if we still need to do this in a delayed fashion
- for _, typ := range p.interfaceList {
- typ.Complete()
- }
-
- // record all referenced packages as imports
- list := append(([]*types.Package)(nil), p.pkgList[1:]...)
- sort.Sort(byPath(list))
- pkg.SetImports(list)
-
- // package was imported completely and without errors
- pkg.MarkComplete()
-
- return p.read, pkg, nil
-}
-
func errorf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
-func (p *importer) pkg() *types.Package {
- // if the package was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.pkgList[i]
- }
-
- // otherwise, i is the package tag (< 0)
- if i != packageTag {
- errorf("unexpected package tag %d version %d", i, p.version)
- }
-
- // read package data
- name := p.string()
- var path string
- if p.version >= 5 {
- path = p.path()
- } else {
- path = p.string()
- }
- if p.version >= 6 {
- p.int() // package height; unused by go/types
- }
-
- // we should never see an empty package name
- if name == "" {
- errorf("empty package name in import")
- }
-
- // an empty path denotes the package we are currently importing;
- // it must be the first package we see
- if (path == "") != (len(p.pkgList) == 0) {
- errorf("package path %q for pkg index %d", path, len(p.pkgList))
- }
-
- // if the package was imported before, use that one; otherwise create a new one
- if path == "" {
- path = p.importpath
- }
- pkg := p.imports[path]
- if pkg == nil {
- pkg = types.NewPackage(path, name)
- p.imports[path] = pkg
- } else if pkg.Name() != name {
- errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
- }
- p.pkgList = append(p.pkgList, pkg)
-
- return pkg
-}
-
-// objTag returns the tag value for each object kind.
-func objTag(obj types.Object) int {
- switch obj.(type) {
- case *types.Const:
- return constTag
- case *types.TypeName:
- return typeTag
- case *types.Var:
- return varTag
- case *types.Func:
- return funcTag
- default:
- errorf("unexpected object: %v (%T)", obj, obj) // panics
- panic("unreachable")
- }
-}
-
-func sameObj(a, b types.Object) bool {
- // Because unnamed types are not canonicalized, we cannot simply compare types for
- // (pointer) identity.
- // Ideally we'd check equality of constant values as well, but this is good enough.
- return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
-}
-
-func (p *importer) declare(obj types.Object) {
- pkg := obj.Pkg()
- if alt := pkg.Scope().Insert(obj); alt != nil {
- // This can only trigger if we import a (non-type) object a second time.
- // Excluding type aliases, this cannot happen because 1) we only import a package
- // once; and b) we ignore compiler-specific export data which may contain
- // functions whose inlined function bodies refer to other functions that
- // were already imported.
- // However, type aliases require reexporting the original type, so we need
- // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
- // method importer.obj, switch case importing functions).
- // TODO(gri) review/update this comment once the gc compiler handles type aliases.
- if !sameObj(obj, alt) {
- errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
- }
- }
-}
-
-func (p *importer) obj(tag int) {
- switch tag {
- case constTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- val := p.value()
- p.declare(types.NewConst(pos, pkg, name, typ, val))
-
- case aliasTag:
- // TODO(gri) verify type alias hookup is correct
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- p.declare(types.NewTypeName(pos, pkg, name, typ))
-
- case typeTag:
- p.typ(nil, nil)
-
- case varTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- p.declare(types.NewVar(pos, pkg, name, typ))
-
- case funcTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- params, isddd := p.paramList()
- result, _ := p.paramList()
- sig := types.NewSignature(nil, params, result, isddd)
- p.declare(types.NewFunc(pos, pkg, name, sig))
-
- default:
- errorf("unexpected object tag %d", tag)
- }
-}
-
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
-func (p *importer) pos() token.Pos {
- if !p.posInfoFormat {
- return token.NoPos
- }
-
- file := p.prevFile
- line := p.prevLine
- delta := p.int()
- line += delta
- if p.version >= 5 {
- if delta == deltaNewFile {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.path()
- line = n
- }
- }
- } else {
- if delta == 0 {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.prevFile[:n] + p.string()
- line = p.int()
- }
- }
- }
- p.prevFile = file
- p.prevLine = line
-
- return p.fake.pos(file, line, 0)
-}
-
// Synthesize a token.Pos
type fakeFileSet struct {
fset *token.FileSet
@@ -389,205 +73,6 @@ var (
fakeLinesOnce sync.Once
)
-func (p *importer) qualifiedName() (pkg *types.Package, name string) {
- name = p.string()
- pkg = p.pkg()
- return
-}
-
-func (p *importer) record(t types.Type) {
- p.typList = append(p.typList, t)
-}
-
-// A dddSlice is a types.Type representing ...T parameters.
-// It only appears for parameter types and does not escape
-// the importer.
-type dddSlice struct {
- elem types.Type
-}
-
-func (t *dddSlice) Underlying() types.Type { return t }
-func (t *dddSlice) String() string { return "..." + t.elem.String() }
-
-// parent is the package which declared the type; parent == nil means
-// the package currently imported. The parent package is needed for
-// exported struct fields and interface methods which don't contain
-// explicit package information in the export data.
-//
-// A non-nil tname is used as the "owner" of the result type; i.e.,
-// the result type is the underlying type of tname. tname is used
-// to give interface methods a named receiver type where possible.
-func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
- // if the type was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.typList[i]
- }
-
- // otherwise, i is the type tag (< 0)
- switch i {
- case namedTag:
- // read type object
- pos := p.pos()
- parent, name := p.qualifiedName()
- scope := parent.Scope()
- obj := scope.Lookup(name)
-
- // if the object doesn't exist yet, create and insert it
- if obj == nil {
- obj = types.NewTypeName(pos, parent, name, nil)
- scope.Insert(obj)
- }
-
- if _, ok := obj.(*types.TypeName); !ok {
- errorf("pkg = %s, name = %s => %s", parent, name, obj)
- }
-
- // associate new named type with obj if it doesn't exist yet
- t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
-
- // but record the existing type, if any
- tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
- p.record(tname)
-
- // read underlying type
- t0.SetUnderlying(p.typ(parent, t0))
-
- // interfaces don't have associated methods
- if types.IsInterface(t0) {
- return tname
- }
-
- // read associated methods
- for i := p.int(); i > 0; i-- {
- // TODO(gri) replace this with something closer to fieldName
- pos := p.pos()
- name := p.string()
- if !exported(name) {
- p.pkg()
- }
-
- recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
- params, isddd := p.paramList()
- result, _ := p.paramList()
- p.int() // go:nointerface pragma - discarded
-
- sig := types.NewSignature(recv.At(0), params, result, isddd)
- t0.AddMethod(types.NewFunc(pos, parent, name, sig))
- }
-
- return tname
-
- case arrayTag:
- t := new(types.Array)
- if p.trackAllTypes {
- p.record(t)
- }
-
- n := p.int64()
- *t = *types.NewArray(p.typ(parent, nil), n)
- return t
-
- case sliceTag:
- t := new(types.Slice)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewSlice(p.typ(parent, nil))
- return t
-
- case dddTag:
- t := new(dddSlice)
- if p.trackAllTypes {
- p.record(t)
- }
-
- t.elem = p.typ(parent, nil)
- return t
-
- case structTag:
- t := new(types.Struct)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewStruct(p.fieldList(parent))
- return t
-
- case pointerTag:
- t := new(types.Pointer)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewPointer(p.typ(parent, nil))
- return t
-
- case signatureTag:
- t := new(types.Signature)
- if p.trackAllTypes {
- p.record(t)
- }
-
- params, isddd := p.paramList()
- result, _ := p.paramList()
- *t = *types.NewSignature(nil, params, result, isddd)
- return t
-
- case interfaceTag:
- // Create a dummy entry in the type list. This is safe because we
- // cannot expect the interface type to appear in a cycle, as any
- // such cycle must contain a named type which would have been
- // first defined earlier.
- // TODO(gri) Is this still true now that we have type aliases?
- // See issue #23225.
- n := len(p.typList)
- if p.trackAllTypes {
- p.record(nil)
- }
-
- var embeddeds []types.Type
- for n := p.int(); n > 0; n-- {
- p.pos()
- embeddeds = append(embeddeds, p.typ(parent, nil))
- }
-
- t := newInterface(p.methodList(parent, tname), embeddeds)
- p.interfaceList = append(p.interfaceList, t)
- if p.trackAllTypes {
- p.typList[n] = t
- }
- return t
-
- case mapTag:
- t := new(types.Map)
- if p.trackAllTypes {
- p.record(t)
- }
-
- key := p.typ(parent, nil)
- val := p.typ(parent, nil)
- *t = *types.NewMap(key, val)
- return t
-
- case chanTag:
- t := new(types.Chan)
- if p.trackAllTypes {
- p.record(t)
- }
-
- dir := chanDir(p.int())
- val := p.typ(parent, nil)
- *t = *types.NewChan(dir, val)
- return t
-
- default:
- errorf("unexpected type tag %d", i) // panics
- panic("unreachable")
- }
-}
-
func chanDir(d int) types.ChanDir {
// tag values must match the constants in cmd/compile/internal/gc/go.go
switch d {
@@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir {
}
}
-func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
- if n := p.int(); n > 0 {
- fields = make([]*types.Var, n)
- tags = make([]string, n)
- for i := range fields {
- fields[i], tags[i] = p.field(parent)
- }
- }
- return
-}
-
-func (p *importer) field(parent *types.Package) (*types.Var, string) {
- pos := p.pos()
- pkg, name, alias := p.fieldName(parent)
- typ := p.typ(parent, nil)
- tag := p.string()
-
- anonymous := false
- if name == "" {
- // anonymous field - typ must be T or *T and T must be a type name
- switch typ := deref(typ).(type) {
- case *types.Basic: // basic types are named types
- pkg = nil // // objects defined in Universe scope have no package
- name = typ.Name()
- case *types.Named:
- name = typ.Obj().Name()
- default:
- errorf("named base type expected")
- }
- anonymous = true
- } else if alias {
- // anonymous field: we have an explicit name because it's an alias
- anonymous = true
- }
-
- return types.NewField(pos, pkg, name, typ, anonymous), tag
-}
-
-func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
- if n := p.int(); n > 0 {
- methods = make([]*types.Func, n)
- for i := range methods {
- methods[i] = p.method(parent, baseType)
- }
- }
- return
-}
-
-func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
- pos := p.pos()
- pkg, name, _ := p.fieldName(parent)
- // If we don't have a baseType, use a nil receiver.
- // A receiver using the actual interface type (which
- // we don't know yet) will be filled in when we call
- // types.Interface.Complete.
- var recv *types.Var
- if baseType != nil {
- recv = types.NewVar(token.NoPos, parent, "", baseType)
- }
- params, isddd := p.paramList()
- result, _ := p.paramList()
- sig := types.NewSignature(recv, params, result, isddd)
- return types.NewFunc(pos, pkg, name, sig)
-}
-
-func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) {
- name = p.string()
- pkg = parent
- if pkg == nil {
- // use the imported package instead
- pkg = p.pkgList[0]
- }
- if p.version == 0 && name == "_" {
- // version 0 didn't export a package for _ fields
- return
- }
- switch name {
- case "":
- // 1) field name matches base type name and is exported: nothing to do
- case "?":
- // 2) field name matches base type name and is not exported: need package
- name = ""
- pkg = p.pkg()
- case "@":
- // 3) field name doesn't match type name (alias)
- name = p.string()
- alias = true
- fallthrough
- default:
- if !exported(name) {
- pkg = p.pkg()
- }
- }
- return
-}
-
-func (p *importer) paramList() (*types.Tuple, bool) {
- n := p.int()
- if n == 0 {
- return nil, false
- }
- // negative length indicates unnamed parameters
- named := true
- if n < 0 {
- n = -n
- named = false
- }
- // n > 0
- params := make([]*types.Var, n)
- isddd := false
- for i := range params {
- params[i], isddd = p.param(named)
- }
- return types.NewTuple(params...), isddd
-}
-
-func (p *importer) param(named bool) (*types.Var, bool) {
- t := p.typ(nil, nil)
- td, isddd := t.(*dddSlice)
- if isddd {
- t = types.NewSlice(td.elem)
- }
-
- var pkg *types.Package
- var name string
- if named {
- name = p.string()
- if name == "" {
- errorf("expected named parameter")
- }
- if name != "_" {
- pkg = p.pkg()
- }
- if i := strings.Index(name, "·"); i > 0 {
- name = name[:i] // cut off gc-specific parameter numbering
- }
- }
-
- // read and discard compiler-specific info
- p.string()
-
- return types.NewVar(token.NoPos, pkg, name, t), isddd
-}
-
-func exported(name string) bool {
- ch, _ := utf8.DecodeRuneInString(name)
- return unicode.IsUpper(ch)
-}
-
-func (p *importer) value() constant.Value {
- switch tag := p.tagOrIndex(); tag {
- case falseTag:
- return constant.MakeBool(false)
- case trueTag:
- return constant.MakeBool(true)
- case int64Tag:
- return constant.MakeInt64(p.int64())
- case floatTag:
- return p.float()
- case complexTag:
- re := p.float()
- im := p.float()
- return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
- case stringTag:
- return constant.MakeString(p.string())
- case unknownTag:
- return constant.MakeUnknown()
- default:
- errorf("unexpected value tag %d", tag) // panics
- panic("unreachable")
- }
-}
-
-func (p *importer) float() constant.Value {
- sign := p.int()
- if sign == 0 {
- return constant.MakeInt64(0)
- }
-
- exp := p.int()
- mant := []byte(p.string()) // big endian
-
- // remove leading 0's if any
- for len(mant) > 0 && mant[0] == 0 {
- mant = mant[1:]
- }
-
- // convert to little endian
- // TODO(gri) go/constant should have a more direct conversion function
- // (e.g., once it supports a big.Float based implementation)
- for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
- mant[i], mant[j] = mant[j], mant[i]
- }
-
- // adjust exponent (constant.MakeFromBytes creates an integer value,
- // but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
- exp -= len(mant) << 3
- if len(mant) > 0 {
- for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
- exp++
- }
- }
-
- x := constant.MakeFromBytes(mant)
- switch {
- case exp < 0:
- d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
- x = constant.BinaryOp(x, token.QUO, d)
- case exp > 0:
- x = constant.Shift(x, token.SHL, uint(exp))
- }
-
- if sign < 0 {
- x = constant.UnaryOp(token.SUB, x, 0)
- }
- return x
-}
-
-// ----------------------------------------------------------------------------
-// Low-level decoders
-
-func (p *importer) tagOrIndex() int {
- if p.debugFormat {
- p.marker('t')
- }
-
- return int(p.rawInt64())
-}
-
-func (p *importer) int() int {
- x := p.int64()
- if int64(int(x)) != x {
- errorf("exported integer too large")
- }
- return int(x)
-}
-
-func (p *importer) int64() int64 {
- if p.debugFormat {
- p.marker('i')
- }
-
- return p.rawInt64()
-}
-
-func (p *importer) path() string {
- if p.debugFormat {
- p.marker('p')
- }
- // if the path was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.pathList[i]
- }
- // otherwise, i is the negative path length (< 0)
- a := make([]string, -i)
- for n := range a {
- a[n] = p.string()
- }
- s := strings.Join(a, "/")
- p.pathList = append(p.pathList, s)
- return s
-}
-
-func (p *importer) string() string {
- if p.debugFormat {
- p.marker('s')
- }
- // if the string was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.strList[i]
- }
- // otherwise, i is the negative string length (< 0)
- if n := int(-i); n <= cap(p.buf) {
- p.buf = p.buf[:n]
- } else {
- p.buf = make([]byte, n)
- }
- for i := range p.buf {
- p.buf[i] = p.rawByte()
- }
- s := string(p.buf)
- p.strList = append(p.strList, s)
- return s
-}
-
-func (p *importer) marker(want byte) {
- if got := p.rawByte(); got != want {
- errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
- }
-
- pos := p.read
- if n := int(p.rawInt64()); n != pos {
- errorf("incorrect position: got %d; want %d", n, pos)
- }
-}
-
-// rawInt64 should only be used by low-level decoders.
-func (p *importer) rawInt64() int64 {
- i, err := binary.ReadVarint(p)
- if err != nil {
- errorf("read error: %v", err)
- }
- return i
-}
-
-// rawStringln should only be used to read the initial version string.
-func (p *importer) rawStringln(b byte) string {
- p.buf = p.buf[:0]
- for b != '\n' {
- p.buf = append(p.buf, b)
- b = p.rawByte()
- }
- return string(p.buf)
-}
-
-// needed for binary.ReadVarint in rawInt64
-func (p *importer) ReadByte() (byte, error) {
- return p.rawByte(), nil
-}
-
-// byte is the bottleneck interface for reading p.data.
-// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
-// rawByte should only be used by low-level decoders.
-func (p *importer) rawByte() byte {
- b := p.data[0]
- r := 1
- if b == '|' {
- b = p.data[1]
- r = 2
- switch b {
- case 'S':
- b = '$'
- case '|':
- // nothing to do
- default:
- errorf("unexpected escape sequence in export data")
- }
- }
- p.data = p.data[r:]
- p.read += r
- return b
-
-}
-
-// ----------------------------------------------------------------------------
-// Export format
-
-// Tags. Must be < 0.
-const (
- // Objects
- packageTag = -(iota + 1)
- constTag
- typeTag
- varTag
- funcTag
- endTag
-
- // Types
- namedTag
- arrayTag
- sliceTag
- dddTag
- structTag
- pointerTag
- signatureTag
- interfaceTag
- mapTag
- chanTag
-
- // Values
- falseTag
- trueTag
- int64Tag
- floatTag
- fractionTag // not used by gc
- complexTag
- stringTag
- nilTag // only used by gc (appears in exported inlined function bodies)
- unknownTag // not used by gc (only appears in packages with errors)
-
- // Type aliases
- aliasTag
-)
-
var predeclOnce sync.Once
var predecl []types.Type // initialized lazily
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index a973dece..b1223713 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -230,20 +230,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
- // The indexed export format starts with an 'i'; the older
- // binary export format starts with a 'c', 'd', or 'v'
- // (from "version"). Select appropriate importer.
+ // Select appropriate importer.
if len(data) > 0 {
switch data[0] {
- case 'i':
+ case 'v', 'c', 'd': // binary, till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
+
+ case 'i': // indexed, till go1.19
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
- case 'v', 'c', 'd':
- _, pkg, err := BImportData(fset, packages, data, id)
- return pkg, err
-
- case 'u':
+ case 'u': // unified, from go1.20
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index a0dc0b5e..3fc7989c 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -913,6 +913,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) {
w.int64(int64(v.Kind()))
}
+ if v.Kind() == constant.Unknown {
+ // golang/go#60605: treat unknown constant values as if they have invalid type
+ //
+ // This loses some fidelity over the package type-checked from source, but that
+ // is acceptable.
+ //
+ // TODO(rfindley): we should switch on the recorded constant kind rather
+ // than the constant type
+ return
+ }
+
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
case types.IsBoolean:
w.bool(constant.BoolVal(v))
@@ -969,6 +980,16 @@ func constantToFloat(x constant.Value) *big.Float {
return &f
}
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
// mpint exports a multi-precision integer.
//
// For unsigned types, small values are written out as a single
@@ -1178,3 +1199,12 @@ func (q *objQueue) popHead() types.Object {
q.head++
return obj
}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index be6dace1..94a5eba3 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -131,7 +131,7 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte,
} else if version > currentVersion {
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
} else {
- err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)
}
}
}()
@@ -140,11 +140,8 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte,
r := &intReader{bytes.NewReader(data), path}
if bundle {
- bundleVersion := r.uint64()
- switch bundleVersion {
- case bundleVersion:
- default:
- errorf("unknown bundle format version %d", bundleVersion)
+ if v := r.uint64(); v != bundleVersion {
+ errorf("unknown bundle format version %d", v)
}
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 34fc783f..b977435f 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -10,6 +10,7 @@
package gcimporter
import (
+ "fmt"
"go/token"
"go/types"
"sort"
@@ -63,6 +64,14 @@ type typeInfo struct {
}
func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ if !debug {
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
+ }
+ }()
+ }
+
s := string(data)
s = s[:strings.LastIndex(s, "\n$$\n")]
input := pkgbits.NewPkgDecoder(path, s)
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index 3c0afe72..8d9fc98d 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -24,6 +24,9 @@ import (
exec "golang.org/x/sys/execabs"
"golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+ "golang.org/x/tools/internal/event/tag"
)
// An Runner will run go command invocations and serialize
@@ -53,9 +56,19 @@ func (runner *Runner) initialize() {
// 1.14: go: updating go.mod: existing contents have changed since last read
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
+// verb is an event label for the go command verb.
+var verb = keys.NewString("verb", "go command verb")
+
+func invLabels(inv Invocation) []label.Label {
+ return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)}
+}
+
// Run is a convenience wrapper around RunRaw.
// It returns only stdout and a "friendly" error.
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
+ defer done()
+
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
return stdout, friendly
}
@@ -63,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e
// RunPiped runs the invocation serially, always waiting for any concurrent
// invocations to complete first.
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
+ defer done()
+
_, err := runner.runPiped(ctx, inv, stdout, stderr)
return err
}
@@ -70,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde
// RunRaw runs the invocation, serializing requests only if they fight over
// go.mod changes.
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
+ defer done()
// Make sure the runner is always initialized.
runner.initialize()
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index 6b493525..d4f1b4e8 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -26,6 +26,7 @@ import (
"unicode/utf8"
"golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/gopathwalk"
)
@@ -543,7 +544,7 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
var fixImports = fixImportsDefault
func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
- fixes, err := getFixes(fset, f, filename, env)
+ fixes, err := getFixes(context.Background(), fset, f, filename, env)
if err != nil {
return err
}
@@ -553,7 +554,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
// It does not modify the ast.
-func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
+func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
abs, err := filepath.Abs(filename)
if err != nil {
return nil, err
@@ -607,7 +608,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
// Go look for candidates in $GOPATH, etc. We don't necessarily load
// the real exports of sibling imports, so keep assuming their contents.
- if err := addExternalCandidates(p, p.missingRefs, filename); err != nil {
+ if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil {
return nil, err
}
@@ -1055,7 +1056,10 @@ type scanCallback struct {
exportsLoaded func(pkg *pkg, exports []string)
}
-func addExternalCandidates(pass *pass, refs references, filename string) error {
+func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
+ ctx, done := event.Start(ctx, "imports.addExternalCandidates")
+ defer done()
+
var mu sync.Mutex
found := make(map[string][]pkgDistance)
callback := &scanCallback{
diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go
index 95a88383..58e637b9 100644
--- a/vendor/golang.org/x/tools/internal/imports/imports.go
+++ b/vendor/golang.org/x/tools/internal/imports/imports.go
@@ -11,6 +11,7 @@ package imports
import (
"bufio"
"bytes"
+ "context"
"fmt"
"go/ast"
"go/format"
@@ -23,6 +24,7 @@ import (
"strings"
"golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/internal/event"
)
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
@@ -66,14 +68,17 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
-func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
+func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
+ ctx, done := event.Start(ctx, "imports.FixImports")
+ defer done()
+
fileSet := token.NewFileSet()
file, _, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
- return getFixes(fileSet, file, filename, opt.Env)
+ return getFixes(ctx, fileSet, file, filename, opt.Env)
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go
index 7d99d04c..977d2389 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod.go
@@ -19,6 +19,7 @@ import (
"strings"
"golang.org/x/mod/module"
+ "golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/gopathwalk"
)
@@ -37,7 +38,7 @@ type ModuleResolver struct {
mains []*gocommand.ModuleJSON
mainByDir map[string]*gocommand.ModuleJSON
modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
- modsByDir []*gocommand.ModuleJSON // ...or Dir.
+ modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir.
// moduleCacheCache stores information about the module cache.
moduleCacheCache *dirInfoCache
@@ -123,7 +124,7 @@ func (r *ModuleResolver) init() error {
})
sort.Slice(r.modsByDir, func(i, j int) bool {
count := func(x int) int {
- return strings.Count(r.modsByDir[x].Dir, "/")
+ return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator))
}
return count(j) < count(i) // descending order
})
@@ -327,6 +328,10 @@ func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON {
// - in /vendor/ in -mod=vendor mode.
// - nested module? Dunno.
// Rumor has it that replace targets cannot contain other replace targets.
+ //
+ // Note that it is critical here that modsByDir is sorted to have deeper dirs
+ // first. This ensures that findModuleByDir finds the innermost module.
+ // See also golang/go#56291.
for _, m := range r.modsByDir {
if !strings.HasPrefix(dir, m.Dir) {
continue
@@ -424,6 +429,9 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (
}
func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error {
+ ctx, done := event.Start(ctx, "imports.ModuleResolver.scan")
+ defer done()
+
if err := r.init(); err != nil {
return err
}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
index cfba8189..b9e87c69 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/common.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -105,6 +105,26 @@ func OriginMethod(fn *types.Func) *types.Func {
}
orig := NamedTypeOrigin(named)
gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
+
+ // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In:
+ // package p
+ // type T *int
+ // func (*T) f() {}
+ // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}.
+ // Here we make them consistent by force.
+ // (The go/types bug is general, but this workaround is reached only
+ // for generic T thanks to the early return above.)
+ if gfn == nil {
+ mset := types.NewMethodSet(types.NewPointer(orig))
+ for i := 0; i < mset.Len(); i++ {
+ m := mset.At(i)
+ if m.Obj().Id() == fn.Id() {
+ gfn = m.Obj()
+ break
+ }
+ }
+ }
+
return gfn.(*types.Func)
}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index 3c53fbc6..ce7d4351 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -11,8 +11,6 @@ import (
"go/types"
"reflect"
"unsafe"
-
- "golang.org/x/tools/go/types/objectpath"
)
func SetUsesCgo(conf *types.Config) bool {
@@ -52,10 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
}
var SetGoVersion = func(conf *types.Config, version string) bool { return false }
-
-// NewObjectpathEncoder returns a function closure equivalent to
-// objectpath.For but amortized for multiple (sequential) calls.
-// It is a temporary workaround, pending the approval of proposal 58668.
-//
-//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor
-func NewObjectpathFunc() func(types.Object) (objectpath.Path, error)
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a6a82123..39e94c85 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# github.com/99designs/gqlgen v0.17.31
+# github.com/99designs/gqlgen v0.17.33
## explicit; go 1.18
github.com/99designs/gqlgen
github.com/99designs/gqlgen/api
@@ -33,7 +33,7 @@ github.com/KyleBanks/depth
# github.com/Masterminds/semver/v3 v3.2.1
## explicit; go 1.18
github.com/Masterminds/semver/v3
-# github.com/adhocore/gronx v1.1.2
+# github.com/adhocore/gronx v1.6.3
## explicit; go 1.13
github.com/adhocore/gronx
# github.com/agnivade/levenshtein v1.1.1
@@ -54,10 +54,10 @@ github.com/beorn7/perks/quantile
# github.com/boltdb/bolt v1.3.1
## explicit
github.com/boltdb/bolt
-# github.com/caddyserver/certmagic v0.17.2
-## explicit; go 1.18
+# github.com/caddyserver/certmagic v0.18.0
+## explicit; go 1.19
github.com/caddyserver/certmagic
-# github.com/casbin/casbin/v2 v2.69.1
+# github.com/casbin/casbin/v2 v2.71.0
## explicit; go 1.13
github.com/casbin/casbin/v2
github.com/casbin/casbin/v2/config
@@ -82,7 +82,7 @@ github.com/cpuguy83/go-md2man/v2/md2man
## explicit; go 1.18
github.com/datarhei/core-client-go/v16
github.com/datarhei/core-client-go/v16/api
-# github.com/datarhei/gosrt v0.4.1
+# github.com/datarhei/gosrt v0.5.0
## explicit; go 1.18
github.com/datarhei/gosrt
github.com/datarhei/gosrt/internal/circular
@@ -145,7 +145,7 @@ github.com/go-openapi/jsonreference/internal
# github.com/go-openapi/spec v0.20.9
## explicit; go 1.13
github.com/go-openapi/spec
-# github.com/go-openapi/swag v0.22.3
+# github.com/go-openapi/swag v0.22.4
## explicit; go 1.18
github.com/go-openapi/swag
# github.com/go-playground/locales v0.14.1
@@ -155,7 +155,7 @@ github.com/go-playground/locales/currency
# github.com/go-playground/universal-translator v0.18.1
## explicit; go 1.18
github.com/go-playground/universal-translator
-# github.com/go-playground/validator/v10 v10.14.0
+# github.com/go-playground/validator/v10 v10.14.1
## explicit; go 1.18
github.com/go-playground/validator/v10
# github.com/gobwas/glob v0.2.3
@@ -195,7 +195,7 @@ github.com/hashicorp/go-msgpack/codec
# github.com/hashicorp/golang-lru v0.5.4
## explicit; go 1.12
github.com/hashicorp/golang-lru/simplelru
-# github.com/hashicorp/golang-lru/v2 v2.0.2
+# github.com/hashicorp/golang-lru/v2 v2.0.3
## explicit; go 1.18
github.com/hashicorp/golang-lru/v2
github.com/hashicorp/golang-lru/v2/simplelru
@@ -221,10 +221,10 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.16.5
+# github.com/klauspost/compress v1.16.6
## explicit; go 1.18
github.com/klauspost/compress/s2
-# github.com/klauspost/cpuid/v2 v2.2.4
+# github.com/klauspost/cpuid/v2 v2.2.5
## explicit; go 1.15
github.com/klauspost/cpuid/v2
# github.com/labstack/echo/v4 v4.10.2
@@ -277,7 +277,7 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.55
+# github.com/minio/minio-go/v7 v7.0.57
## explicit; go 1.17
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials
@@ -314,7 +314,7 @@ github.com/power-devops/perfstat
# github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
## explicit
github.com/prep/average
-# github.com/prometheus/client_golang v1.15.1
+# github.com/prometheus/client_golang v1.16.0
## explicit; go 1.17
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
@@ -338,7 +338,7 @@ github.com/rs/xid
# github.com/russross/blackfriday/v2 v2.1.0
## explicit
github.com/russross/blackfriday/v2
-# github.com/shirou/gopsutil/v3 v3.23.4
+# github.com/shirou/gopsutil/v3 v3.23.5
## explicit; go 1.15
github.com/shirou/gopsutil/v3/cpu
github.com/shirou/gopsutil/v3/disk
@@ -349,7 +349,7 @@ github.com/shirou/gopsutil/v3/process
# github.com/shoenig/go-m1cpu v0.1.6
## explicit; go 1.20
github.com/shoenig/go-m1cpu
-# github.com/sirupsen/logrus v1.9.2
+# github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13
github.com/sirupsen/logrus
# github.com/stretchr/testify v1.8.4
@@ -368,10 +368,10 @@ github.com/swaggo/swag
# github.com/tklauser/go-sysconf v0.3.11
## explicit; go 1.13
github.com/tklauser/go-sysconf
-# github.com/tklauser/numcpus v0.6.0
+# github.com/tklauser/numcpus v0.6.1
## explicit; go 1.13
github.com/tklauser/numcpus
-# github.com/urfave/cli/v2 v2.24.4
+# github.com/urfave/cli/v2 v2.25.5
## explicit; go 1.18
github.com/urfave/cli/v2
# github.com/valyala/bytebufferpool v1.0.0
@@ -380,7 +380,7 @@ github.com/valyala/bytebufferpool
# github.com/valyala/fasttemplate v1.2.2
## explicit; go 1.12
github.com/valyala/fasttemplate
-# github.com/vektah/gqlparser/v2 v2.5.1
+# github.com/vektah/gqlparser/v2 v2.5.3
## explicit; go 1.16
github.com/vektah/gqlparser/v2
github.com/vektah/gqlparser/v2/ast
@@ -429,7 +429,7 @@ go.uber.org/zap/internal/bufferpool
go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.9.0
+# golang.org/x/crypto v0.10.0
## explicit; go 1.17
golang.org/x/crypto/acme
golang.org/x/crypto/acme/autocert
@@ -440,12 +440,12 @@ golang.org/x/crypto/cryptobyte/asn1
golang.org/x/crypto/ocsp
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/sha3
-# golang.org/x/mod v0.10.0
+# golang.org/x/mod v0.11.0
## explicit; go 1.17
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.10.0
+# golang.org/x/net v0.11.0
## explicit; go 1.17
golang.org/x/net/bpf
golang.org/x/net/html
@@ -460,7 +460,7 @@ golang.org/x/net/internal/socket
golang.org/x/net/ipv4
golang.org/x/net/ipv6
golang.org/x/net/publicsuffix
-# golang.org/x/sys v0.8.0
+# golang.org/x/sys v0.9.0
## explicit; go 1.17
golang.org/x/sys/cpu
golang.org/x/sys/execabs
@@ -468,7 +468,7 @@ golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/text v0.9.0
+# golang.org/x/text v0.10.0
## explicit; go 1.17
golang.org/x/text/cases
golang.org/x/text/internal
@@ -483,7 +483,7 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.3.0
## explicit
golang.org/x/time/rate
-# golang.org/x/tools v0.9.1
+# golang.org/x/tools v0.10.0
## explicit; go 1.18
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/buildutil
@@ -492,12 +492,12 @@ golang.org/x/tools/go/internal/cgo
golang.org/x/tools/go/internal/packagesdriver
golang.org/x/tools/go/loader
golang.org/x/tools/go/packages
-golang.org/x/tools/go/types/objectpath
golang.org/x/tools/imports
golang.org/x/tools/internal/event
golang.org/x/tools/internal/event/core
golang.org/x/tools/internal/event/keys
golang.org/x/tools/internal/event/label
+golang.org/x/tools/internal/event/tag
golang.org/x/tools/internal/fastwalk
golang.org/x/tools/internal/gcimporter
golang.org/x/tools/internal/gocommand