Update dependencies

This commit is contained in:
Ingo Oppermann 2023-06-16 13:30:56 +02:00
parent e366ff8626
commit 0a5661f7ab
No known key found for this signature in database
GPG Key ID: 2AB32426E9DD229E
208 changed files with 4135 additions and 6024 deletions

46
go.mod
View File

@ -3,17 +3,17 @@ module github.com/datarhei/core/v16
go 1.18
require (
github.com/99designs/gqlgen v0.17.31
github.com/99designs/gqlgen v0.17.33
github.com/Masterminds/semver/v3 v3.2.1
github.com/adhocore/gronx v1.1.2
github.com/adhocore/gronx v1.6.3
github.com/atrox/haikunatorgo/v2 v2.0.1
github.com/caddyserver/certmagic v0.17.2
github.com/casbin/casbin/v2 v2.69.1
github.com/caddyserver/certmagic v0.18.0
github.com/casbin/casbin/v2 v2.71.0
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614141756-a25a5fc3c60e
github.com/datarhei/gosrt v0.4.1
github.com/datarhei/gosrt v0.5.0
github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a
github.com/fujiwara/shapeio v1.0.0
github.com/go-playground/validator/v10 v10.14.0
github.com/go-playground/validator/v10 v10.14.1
github.com/gobwas/glob v0.2.3
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/uuid v1.3.0
@ -26,19 +26,19 @@ require (
github.com/lestrrat-go/strftime v1.0.6
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.19
github.com/minio/minio-go/v7 v7.0.55
github.com/minio/minio-go/v7 v7.0.57
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
github.com/prometheus/client_golang v1.15.1
github.com/shirou/gopsutil/v3 v3.23.4
github.com/prometheus/client_golang v1.16.0
github.com/shirou/gopsutil/v3 v3.23.5
github.com/stretchr/testify v1.8.4
github.com/swaggo/echo-swagger v1.4.0
github.com/swaggo/swag v1.16.1
github.com/vektah/gqlparser/v2 v2.5.1
github.com/vektah/gqlparser/v2 v2.5.3
github.com/xeipuuv/gojsonschema v1.2.0
go.etcd.io/bbolt v1.3.7
go.uber.org/automaxprocs v1.5.2
go.uber.org/zap v1.24.0
golang.org/x/mod v0.10.0
golang.org/x/mod v0.11.0
)
//replace github.com/datarhei/core-client-go/v16 => ../core-client-go
@ -61,7 +61,7 @@ require (
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
@ -70,12 +70,12 @@ require (
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.3 // indirect
github.com/iancoleman/orderedmap v0.2.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/klauspost/compress v1.16.6 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/labstack/gommon v0.4.0 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/libdns/libdns v0.2.1 // indirect
@ -99,11 +99,11 @@ require (
github.com/rs/xid v1.5.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.2 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/swaggo/files/v2 v2.0.0 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/urfave/cli/v2 v2.24.4 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/urfave/cli/v2 v2.25.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
@ -113,12 +113,12 @@ require (
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.1.12 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/crypto v0.10.0 // indirect
golang.org/x/net v0.11.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.9.1 // indirect
golang.org/x/tools v0.10.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

117
go.sum
View File

@ -1,5 +1,5 @@
github.com/99designs/gqlgen v0.17.31 h1:VncSQ82VxieHkea8tz11p7h/zSbvHSxSDZfywqWt158=
github.com/99designs/gqlgen v0.17.31/go.mod h1:i4rEatMrzzu6RXaHydq1nmEPZkb3bKQsnxNRHS4DQB4=
github.com/99designs/gqlgen v0.17.33 h1:VTUpAtElDszatPSe26N0SD0deJCSxb7TZLlUb6JnVRY=
github.com/99designs/gqlgen v0.17.33/go.mod h1:ygDK+m8zGpoQuSh8xoq80UfisR5JTZr7mN57qXlSIZs=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=
@ -8,11 +8,8 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/adhocore/gronx v1.1.2 h1:Hgm+d8SyGtn+rCoDkxZq3nLNFLLkzRGR7L2ziRRD1w8=
github.com/adhocore/gronx v1.1.2/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/adhocore/gronx v1.6.3 h1:bnm5vieTrY3QQPpsfB0hrAaeaHDpuZTUC2LLCVMLe9c=
github.com/adhocore/gronx v1.6.3/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -29,7 +26,6 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+
github.com/atrox/haikunatorgo/v2 v2.0.1 h1:FCVx2KL2YvZtI1rI9WeEHxeLRrKGr0Dd4wfCJiUXupc=
github.com/atrox/haikunatorgo/v2 v2.0.1/go.mod h1:BBQmx2o+1Z5poziaHRgddAZKOpijwfKdAmMnSYlFK70=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4=
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -38,10 +34,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/caddyserver/certmagic v0.17.2 h1:o30seC1T/dBqBCNNGNHWwj2i5/I/FMjBbTAhjADP3nE=
github.com/caddyserver/certmagic v0.17.2/go.mod h1:ouWUuC490GOLJzkyN35eXfV8bSbwMwSf4bdhkIxtdQE=
github.com/casbin/casbin/v2 v2.69.1 h1:R3e7uveIRN5Pdqvq0GXEhXmn7HyfoEVjp21/mgEXbdI=
github.com/casbin/casbin/v2 v2.69.1/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
github.com/caddyserver/certmagic v0.18.0 h1:L22mJES1WllfLoHUcQUy4wVO7UfOsoL5wtg/Bj7kmIw=
github.com/caddyserver/certmagic v0.18.0/go.mod h1:e0YLTnXIopZ05bBWCLzpIf1Yvk27Q90FGUmGowFRDY8=
github.com/casbin/casbin/v2 v2.71.0 h1:pVzHKXkGgOXIjksEwnrOjNu5CE4xy6aAVzdR8td2gSc=
github.com/casbin/casbin/v2 v2.71.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -50,14 +46,10 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230605095314-42546fbbbece h1:Gv+W986jLcMa/TOKg5YF3RMDlNDDyj7uHuH+mHP7xq8=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230605095314-42546fbbbece/go.mod h1:6L0zr/NUwvaPsCTK/IL17m8JUEtgLp3BDtlsBREwacg=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614130211-fb0f92af8ac9 h1:ntM1tymajXx92ydwi6RSiDG54aQV3cMOtlGRBT6p9Z8=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614130211-fb0f92af8ac9/go.mod h1:6L0zr/NUwvaPsCTK/IL17m8JUEtgLp3BDtlsBREwacg=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614141756-a25a5fc3c60e h1:iQKqGTyIdCyO7kY/G5MCKhzt3xZ5YPRubbJskVp5EvQ=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230614141756-a25a5fc3c60e/go.mod h1:6L0zr/NUwvaPsCTK/IL17m8JUEtgLp3BDtlsBREwacg=
github.com/datarhei/gosrt v0.4.1 h1:08km3wKy72jOdC+JzBDWN57H7xST4mz5lFeJQHuWmMs=
github.com/datarhei/gosrt v0.4.1/go.mod h1:FtsulRiUc67Oi3Ii9JH9aQkpO+ZfgeauRAtIE40mIVA=
github.com/datarhei/gosrt v0.5.0 h1:MhM8kb00nbWc+haNKU7ZdYgSm9pLdxdtas7tcERh8j8=
github.com/datarhei/gosrt v0.5.0/go.mod h1:bcLf0p0FeZl+QY87b+Q8nGkyjyX6IDvI9y9raol8vng=
github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a h1:Tf4DSHY1xruBglr+yYP5Wct7czM86GKMYgbXH8a7OFo=
github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -79,7 +71,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@ -93,15 +84,16 @@ github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/
github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
@ -144,8 +136,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/golang-lru/v2 v2.0.3 h1:kmRrRLlInXvng0SmLxmQpQkpbYAvcXm7NPDrgxJa9mE=
github.com/hashicorp/golang-lru/v2 v2.0.3/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
github.com/hashicorp/raft v1.5.0 h1:uNs9EfJ4FwiArZRxxfd/dQ5d33nV31/CdCHArH89hT8=
github.com/hashicorp/raft v1.5.0/go.mod h1:pKHB2mf/Y25u3AHNSXVRv+yT+WAnmeTX0BwVppVQV+M=
@ -162,18 +154,16 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -224,8 +214,8 @@ github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.55 h1:ZXqUO/8cgfHzI+08h/zGuTTFpISSA32BZmBE3FCLJas=
github.com/minio/minio-go/v7 v7.0.55/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/minio-go/v7 v7.0.57 h1:xsFiOiWjpC1XAGbFEUOzj1/gMXGz7ljfxifwcb/5YXU=
github.com/minio/minio-go/v7 v7.0.57/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@ -238,7 +228,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@ -259,8 +248,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -282,20 +271,18 @@ github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil/v3 v3.23.4 h1:hZwmDxZs7Ewt75DV81r4pFMqbq+di2cbt9FsQBqLD2o=
github.com/shirou/gopsutil/v3 v3.23.4/go.mod h1:ZcGxyfzAMRevhUR2+cfhXDH6gQdFYE/t8j1nsU4mPI8=
github.com/shoenig/go-m1cpu v0.1.5/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/shirou/gopsutil/v3 v3.23.5 h1:5SgDCeQ0KW0S4N0znjeM/eFHXXOKyv2dVNgRq/c9P6Y=
github.com/shirou/gopsutil/v3 v3.23.5/go.mod h1:Ng3Maa27Q2KARVJ0SPZF5NdrQSC3XHKP8IIWrHgMeLY=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.3/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@ -311,6 +298,7 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/swaggo/echo-swagger v1.4.0 h1:RCxLKySw1SceHLqnmc41pKyiIeE+OiD7NSI7FUOBlLo=
@ -321,18 +309,19 @@ github.com/swaggo/swag v1.16.1 h1:fTNRhKstPKxcnoKsytm4sahr8FaYzUcT7i1/3nd/fBg=
github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKikGxto=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU=
github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc=
github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4=
github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs=
github.com/vektah/gqlparser/v2 v2.5.3 h1:goUwv4+blhtwR3GwefadPVI4ubYc/WZSypljWMQa6IE=
github.com/vektah/gqlparser/v2 v2.5.3/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -343,7 +332,6 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
@ -362,12 +350,12 @@ go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -375,14 +363,14 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -401,19 +389,19 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -422,8 +410,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -445,7 +433,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

File diff suppressed because it is too large Load Diff

View File

@ -58,24 +58,24 @@ type AboutVersion struct {
type Metric struct {
Name string `json:"name"`
Labels map[string]interface{} `json:"labels"`
Labels map[string]interface{} `json:"labels,omitempty"`
Values []*scalars.MetricsResponseValue `json:"values"`
}
type MetricInput struct {
Name string `json:"name"`
Labels map[string]interface{} `json:"labels"`
Labels map[string]interface{} `json:"labels,omitempty"`
}
type Metrics struct {
TimerangeSeconds *int `json:"timerange_seconds"`
IntervalSeconds *int `json:"interval_seconds"`
TimerangeSeconds *int `json:"timerange_seconds,omitempty"`
IntervalSeconds *int `json:"interval_seconds,omitempty"`
Metrics []*Metric `json:"metrics"`
}
type MetricsInput struct {
TimerangeSeconds *int `json:"timerange_seconds"`
IntervalSeconds *int `json:"interval_seconds"`
TimerangeSeconds *int `json:"timerange_seconds,omitempty"`
IntervalSeconds *int `json:"interval_seconds,omitempty"`
Metrics []*MetricInput `json:"metrics"`
}
@ -113,7 +113,7 @@ type Process struct {
Config *ProcessConfig `json:"config"`
State *ProcessState `json:"state"`
Report *ProcessReport `json:"report"`
Metadata map[string]interface{} `json:"metadata"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
}
type ProcessConfig struct {
@ -257,7 +257,7 @@ type ProgressIo struct {
Sampling scalars.Uint64 `json:"sampling"`
Layout string `json:"layout"`
Channels scalars.Uint64 `json:"channels"`
Avstream *AVStream `json:"avstream"`
Avstream *AVStream `json:"avstream,omitempty"`
}
type RawAVstream struct {
@ -272,7 +272,7 @@ type RawAVstream struct {
Looping bool `json:"looping"`
Duplicating bool `json:"duplicating"`
Gop string `json:"gop"`
Debug interface{} `json:"debug"`
Debug interface{} `json:"debug,omitempty"`
Input *RawAVstreamIo `json:"input"`
Output *RawAVstreamIo `json:"output"`
Swap *RawAVstreamSwap `json:"swap"`
@ -341,17 +341,17 @@ type State string
const (
StateRunning State = "RUNNING"
StateIDLe State = "IDLE"
StateIdle State = "IDLE"
)
var AllState = []State{
StateRunning,
StateIDLe,
StateIdle,
}
func (e State) IsValid() bool {
switch e {
case StateRunning, StateIDLe:
case StateRunning, StateIdle:
return true
}
return false

View File

@ -2,6 +2,7 @@ package resolver
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.33
import (
"context"

View File

@ -2,6 +2,7 @@ package resolver
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.33
import (
"context"

View File

@ -2,6 +2,7 @@ package resolver
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.33
import (
"context"

View File

@ -2,6 +2,7 @@ package resolver
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.33
import (
"context"

View File

@ -2,6 +2,7 @@ package resolver
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.33
import (
"context"

View File

@ -2,6 +2,7 @@ package resolver
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.33
import (
"context"

View File

@ -50,7 +50,7 @@ func (u *User) validate() error {
return fmt.Errorf("a name is required")
}
chars := `A-Za-z0-9:_-`
chars := `A-Za-z0-9._-`
re := regexp.MustCompile(`[^` + chars + `]`)
if re.MatchString(u.Name) {

View File

@ -196,13 +196,13 @@ func (s *server) Channels() []string {
return channels
}
func (s *server) log(who, what, action, path, message string, client net.Addr) {
func (s *server) log(who, handler, action, resource, message string, client net.Addr) {
s.logger.Info().WithFields(log.Fields{
"who": who,
"what": what,
"action": action,
"path": path,
"client": client.String(),
"who": who,
"handler": handler,
"action": action,
"resource": resource,
"client": client.String(),
}).Log(message)
}
@ -258,7 +258,7 @@ func (s *server) handlePlay(conn *rtmp.Conn) {
identity, err := s.findIdentityFromStreamKey(token)
if err != nil {
s.logger.Debug().WithError(err).Log("invalid streamkey")
s.log(identity, "PLAY", "FORBIDDEN", playpath, "invalid streamkey ("+token+")", remote)
s.log("", "PLAY", "FORBIDDEN", playpath, "invalid streamkey ("+token+")", remote)
return
}
@ -476,13 +476,13 @@ func (s *server) findIdentityFromStreamKey(key string) (string, error) {
var token string
elements := strings.Split(key, ":")
if len(elements) == 1 {
before, after, found := strings.Cut(key, ":")
if !found {
identity = s.iam.GetDefaultVerifier()
token = elements[0]
token = before
} else {
identity, err = s.iam.GetVerifier(elements[0])
token = elements[1]
identity, err = s.iam.GetVerifier(before)
token = after
}
if err != nil {
@ -490,7 +490,13 @@ func (s *server) findIdentityFromStreamKey(key string) (string, error) {
}
if ok, err := identity.VerifyServiceToken(token); !ok {
return "$anon", fmt.Errorf("invalid token: %w", err)
if err != nil {
err = fmt.Errorf("invalid token: %w", err)
} else {
err = fmt.Errorf("invalid token")
}
return "$anon", err
}
return identity.Name(), nil

View File

@ -268,10 +268,11 @@ func (s *server) srtlogListener(ctx context.Context) {
}
}
func (s *server) log(handler, action, resource, message string, client net.Addr) {
func (s *server) log(who, handler, action, resource, message string, client net.Addr) {
s.logger.Info().WithFields(log.Fields{
"who": who,
"handler": handler,
"status": action,
"action": action,
"resource": resource,
"client": client.String(),
}).Log(message)
@ -282,14 +283,19 @@ func (s *server) handleConnect(req srt.ConnRequest) srt.ConnType {
client := req.RemoteAddr()
streamId := req.StreamId()
if req.Version() != 5 {
s.log("", "CONNECT", "INVALID", streamId, "unsupported version", client)
return srt.REJECT
}
si, err := url.ParseStreamId(streamId)
if err != nil {
s.log("CONNECT", "INVALID", "", err.Error(), client)
s.log("", "CONNECT", "INVALID", streamId, err.Error(), client)
return srt.REJECT
}
if len(si.Resource) == 0 {
s.log("CONNECT", "INVALID", "", "stream resource not provided", client)
s.log("", "CONNECT", "INVALID", streamId, "stream resource not provided", client)
return srt.REJECT
}
@ -298,23 +304,23 @@ func (s *server) handleConnect(req srt.ConnRequest) srt.ConnType {
} else if si.Mode == "request" {
mode = srt.SUBSCRIBE
} else {
s.log("CONNECT", "INVALID", si.Resource, "invalid connection mode", client)
s.log("", "CONNECT", "INVALID", si.Resource, "invalid connection mode", client)
return srt.REJECT
}
if len(s.passphrase) != 0 {
if !req.IsEncrypted() {
s.log("CONNECT", "FORBIDDEN", si.Resource, "connection has to be encrypted", client)
s.log("", "CONNECT", "FORBIDDEN", si.Resource, "connection has to be encrypted", client)
return srt.REJECT
}
if err := req.SetPassphrase(s.passphrase); err != nil {
s.log("CONNECT", "FORBIDDEN", si.Resource, err.Error(), client)
s.log("", "CONNECT", "FORBIDDEN", si.Resource, err.Error(), client)
return srt.REJECT
}
} else {
if req.IsEncrypted() {
s.log("CONNECT", "INVALID", si.Resource, "connection must not be encrypted", client)
s.log("", "CONNECT", "INVALID", si.Resource, "connection must not be encrypted", client)
return srt.REJECT
}
}
@ -322,7 +328,7 @@ func (s *server) handleConnect(req srt.ConnRequest) srt.ConnType {
identity, err := s.findIdentityFromToken(si.Token)
if err != nil {
s.logger.Debug().WithError(err).Log("invalid token")
s.log("CONNECT", "FORBIDDEN", si.Resource, "invalid token", client)
s.log(identity, "CONNECT", "FORBIDDEN", si.Resource, "invalid token", client)
return srt.REJECT
}
@ -334,7 +340,7 @@ func (s *server) handleConnect(req srt.ConnRequest) srt.ConnType {
}
if !s.iam.Enforce(identity, domain, resource, action) {
s.log("CONNECT", "FORBIDDEN", si.Resource, "access denied", client)
s.log(identity, "CONNECT", "FORBIDDEN", si.Resource, "access denied", client)
return srt.REJECT
}
@ -350,6 +356,7 @@ func (s *server) publish(conn srt.Conn, isProxy bool) error {
client := conn.RemoteAddr()
si, _ := url.ParseStreamId(streamId)
identity, _ := s.findIdentityFromToken(si.Token)
// Look for the stream
s.lock.Lock()
@ -363,15 +370,15 @@ func (s *server) publish(conn srt.Conn, isProxy bool) error {
s.lock.Unlock()
if ch == nil {
s.log("PUBLISH", "CONFLICT", si.Resource, "already publishing", client)
s.log(identity, "PUBLISH", "CONFLICT", si.Resource, "already publishing", client)
conn.Close()
return fmt.Errorf("already publishing this resource")
}
s.log("PUBLISH", "START", si.Resource, "", client)
s.log(identity, "PUBLISH", "START", si.Resource, "", client)
// Blocks until connection closes
ch.pubsub.Publish(conn)
err := ch.pubsub.Publish(conn)
s.lock.Lock()
delete(s.channels, si.Resource)
@ -379,7 +386,7 @@ func (s *server) publish(conn srt.Conn, isProxy bool) error {
ch.Close()
s.log("PUBLISH", "STOP", si.Resource, "", client)
s.log(identity, "PUBLISH", "STOP", si.Resource, err.Error(), client)
conn.Close()
@ -393,6 +400,7 @@ func (s *server) handleSubscribe(conn srt.Conn) {
client := conn.RemoteAddr()
si, _ := url.ParseStreamId(streamId)
identity, _ := s.findIdentityFromToken(si.Token)
// Look for the stream locally
s.lock.RLock()
@ -403,7 +411,7 @@ func (s *server) handleSubscribe(conn srt.Conn) {
// Check in the cluster for the stream and proxy it
srturl, err := s.proxy.GetURL("srt", si.Resource)
if err != nil {
s.log("SUBSCRIBE", "NOTFOUND", si.Resource, "no publisher for this resource found", client)
s.log(identity, "PLAY", "NOTFOUND", si.Resource, "no publisher for this resource found", client)
return
}
@ -412,16 +420,16 @@ func (s *server) handleSubscribe(conn srt.Conn) {
config := srt.DefaultConfig()
config.StreamId = streamId
config.Latency = 200 * time.Millisecond // This might be a value obtained from the cluster
host, err := config.UnmarshalURL(peerurl)
host, port, err := config.UnmarshalURL(peerurl)
if err != nil {
s.logger.Error().WithField("address", peerurl).WithError(err).Log("Parsing proxy address failed")
s.log("SUBSCRIBE", "NOTFOUND", si.Resource, "no publisher for this resource found", client)
s.log(identity, "PLAY", "NOTFOUND", si.Resource, "no publisher for this resource found", client)
return
}
src, err := srt.Dial("srt", host, config)
src, err := srt.Dial("srt", host+":"+port, config)
if err != nil {
s.logger.Error().WithField("address", peerurl).WithError(err).Log("Proxying address failed")
s.log("SUBSCRIBE", "NOTFOUND", si.Resource, "no publisher for this resource found", client)
s.log(identity, "PLAY", "NOTFOUND", si.Resource, "no publisher for this resource found", client)
return
}
@ -429,13 +437,13 @@ func (s *server) handleSubscribe(conn srt.Conn) {
wg.Add(1)
go func() {
s.log("SUBSCRIBE", "PROXYSTART", peerurl, "", client)
s.log(identity, "PLAY", "PROXYSTART", peerurl, "", client)
wg.Done()
err := s.publish(src, true)
if err != nil {
s.logger.Error().WithField("address", srturl).WithError(err).Log("Proxying address failed")
}
s.log("SUBSCRIBE", "PROXYPUBLISHSTOP", peerurl, "", client)
s.log(identity, "PLAY", "PROXYSTOP", peerurl, "", client)
}()
// Wait for the goroutine to start
@ -463,14 +471,14 @@ func (s *server) handleSubscribe(conn srt.Conn) {
}
if ch != nil {
s.log("SUBSCRIBE", "START", si.Resource, "", client)
s.log(identity, "PLAY", "START", si.Resource, "", client)
id := ch.AddSubscriber(conn, si.Resource)
// Blocks until connection closes
ch.pubsub.Subscribe(conn)
err := ch.pubsub.Subscribe(conn)
s.log("SUBSCRIBE", "STOP", si.Resource, "", client)
s.log(identity, "PLAY", "STOP", si.Resource, err.Error(), client)
ch.RemoveSubscriber(id)
}
@ -486,13 +494,13 @@ func (s *server) findIdentityFromToken(key string) (string, error) {
var token string
elements := strings.Split(key, ":")
if len(elements) == 1 {
before, after, found := strings.Cut(key, ":")
if !found {
identity = s.iam.GetDefaultVerifier()
token = elements[0]
token = before
} else {
identity, err = s.iam.GetVerifier(elements[0])
token = elements[1]
identity, err = s.iam.GetVerifier(before)
token = after
}
if err != nil {
@ -500,7 +508,13 @@ func (s *server) findIdentityFromToken(key string) (string, error) {
}
if ok, err := identity.VerifyServiceToken(token); !ok {
return "$anon", fmt.Errorf("invalid token: %w", err)
if err != nil {
err = fmt.Errorf("invalid token: %w", err)
} else {
err = fmt.Errorf("invalid token")
}
return "$anon", err
}
return identity.Name(), nil

View File

@ -11,8 +11,6 @@ linters:
disable-all: true
enable:
- bodyclose
- deadcode
- depguard
- dupl
- errcheck
- gocritic
@ -25,11 +23,9 @@ linters:
- nakedret
- prealloc
- staticcheck
- structcheck
- typecheck
- unconvert
- unused
- varcheck
issues:
exclude-rules:

View File

@ -5,10 +5,186 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
<a name="unreleased"></a>
## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.30...HEAD)
## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.32...HEAD)
<!-- end of if -->
<!-- end of CommitGroups -->
<a name="v0.17.32"></a>
## [v0.17.32](https://github.com/99designs/gqlgen/compare/v0.17.31...v0.17.32) - 2023-06-06
- <a href="https://github.com/99designs/gqlgen/commit/3a81a78bb7370f067c6bf4f3ce79de0e77f885a1"><tt>3a81a78b</tt></a> release v0.17.32
- <a href="https://github.com/99designs/gqlgen/commit/dbb61174f81ef5e30cb33e772f650abdc41da90a"><tt>dbb61174</tt></a> Added unit tests for defer (<a href="https://github.com/99designs/gqlgen/pull/2657">#2657</a>)
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/5c19c84141c643fa8e3ef59a63d9788ebc966c43"><tt>5c19c841</tt></a> Addressing few issues in defer feature (<a href="https://github.com/99designs/gqlgen/pull/2656">#2656</a>)</summary>
And fixed hasNext to only appear in the payload when there is deferred usage
* Regenerate
* Use go 1.18 compatible atomic operations
* Regenerate
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/8e295024ada297c219aa2e79754a9e4f601d0b56"><tt>8e295024</tt></a> Update extra fields type definition and plus docs about the feature (<a href="https://github.com/99designs/gqlgen/pull/2655">#2655</a>)</summary>
* Update extra fields type definition and plus docs about the feature
* Update docs
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/adf5da27cd1bec2ce81b9a3e8e705ed80714f47a"><tt>adf5da27</tt></a> Make usage of omitempty tag optional (<a href="https://github.com/99designs/gqlgen/pull/2649">#2649</a>)</summary>
* Make usage of omitempty tag optional
* adding probably good enough test
* some kinda docs
* lintersssssssssssssssssssssssssssss
* removing unnecessary fields from config
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/7ab3317689c9ee85bcebb454a123ba8f70307058"><tt>7ab33176</tt></a> Extra fields (<a href="https://github.com/99designs/gqlgen/pull/2638">#2638</a>)
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/22deb8bd6c7662bb8eacc29c9a08df0b85444721"><tt>22deb8bd</tt></a> allow binding a GraphQL `Any` field to a struct method returning `*any` (<a href="https://github.com/99designs/gqlgen/pull/2644">#2644</a>)</summary>
* allow binding GQL `Any` field to struct method returning `*any`
* add singlefile tests for binding to `*any` case
* add followschema tests for binding to `*any` case
* make ptr_to_any binding tests follow binding conventions better
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/c313bf3d289985768fdca4e26162d3c429431711"><tt>c313bf3d</tt></a> `[@defer](https://github.com/defer)` initial support (<a href="https://github.com/99designs/gqlgen/pull/2642">#2642</a>)</summary>
* support returning errors with deferred fragments.
* update integration tests.
* fix gotpl indent and pass the correct context to deferred .Dispatch().
* Added hasNext in the tests
* Added back root_.gotpl
* Regenerate
* Regenerate recursively
* Updated schema-expected.graphql
* Fixed starwars_test.go
* Cleanup
* Add graphql response hasnext omitempty and update tests to match
---------
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/4d945da283950a9cd66fb6df58abe2bb5f0cf836"><tt>4d945da2</tt></a> feat(federation): update Apollo Federation v2 definitions (<a href="https://github.com/99designs/gqlgen/pull/2635">#2635</a>)</summary>
* feat(federation): update Apollo Federation v2 definitions
Fix Apollo Federation v2 directive definitions:
* `_FieldSet` was renamed `FieldSet`
* regenerate examples
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/9796f91dba461051d70a9101c9113b99f7ee82df"><tt>9796f91d</tt></a> Generate entity resolvers for interfaces with [@key](https://github.com/key) defined (<a href="https://github.com/99designs/gqlgen/pull/2634">#2634</a>)
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/33fdd1b5c0b4784c3aece793f1e5e315773dfb0d"><tt>33fdd1b5</tt></a> fix enum capitalization (<a href="https://github.com/99designs/gqlgen/pull/2630">#2630</a>)</summary>
* fix enum capitalization
* apply suggestion: adding comment
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/82a110ce861ebf6a208d98c6b1514c6e845f2b98"><tt>82a110ce</tt></a> Fix uint32 unmarshal (<a href="https://github.com/99designs/gqlgen/pull/2631">#2631</a>)</summary>
The string unmarshal for uint32 used ParseInt instead of ParseUint,
which would parse the wrong range of valid numbers.
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/e62a02772ec7cf3c6f526c68e5689a2bad2e8f3b"><tt>e62a0277</tt></a> Add Changelog entries for v0.17.31
- <a href="https://github.com/99designs/gqlgen/commit/f707aa8d88c494f23ed981add5b43944c27a88f2"><tt>f707aa8d</tt></a> v0.17.31 postrelease bump
<!-- end of Commits -->
<!-- end of Else -->
<!-- end of If NoteGroups -->
<a name="v0.17.31"></a>
## [v0.17.31](https://github.com/99designs/gqlgen/compare/v0.17.30...v0.17.31) - 2023-05-05
- <a href="https://github.com/99designs/gqlgen/commit/37b262075d385c49505ba76e09aec520f23f70f0"><tt>37b26207</tt></a> release v0.17.31
- <a href="https://github.com/99designs/gqlgen/commit/4016b2bde056a9c83b21e1e84cde1319bd8461a8"><tt>4016b2bd</tt></a> fix (<a href="https://github.com/99designs/gqlgen/pull/2628">#2628</a>)
- <a href="https://github.com/99designs/gqlgen/commit/5a81c3e37ae8caab333476e5917613f6d580083b"><tt>5a81c3e3</tt></a> Remove other &&
- <a href="https://github.com/99designs/gqlgen/commit/fde269c0b7130e89104c37b2ee5bc6f8704d788f"><tt>fde269c0</tt></a> Remove extraneous run
- <a href="https://github.com/99designs/gqlgen/commit/47a5b33360aec66c59c0af55fe152bfac8170434"><tt>47a5b333</tt></a> Avoid && in command for retry
- <a href="https://github.com/99designs/gqlgen/commit/4d8f850b51dade590a8672a2b938b486339c74fe"><tt>4d8f850b</tt></a> Add timeout minutes
- <a href="https://github.com/99designs/gqlgen/commit/c839b6c1d846b4bfac7f7e80c4ef9f9293d40413"><tt>c839b6c1</tt></a> Bandaid for flaky websocket tests
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/395c362b50f2e97207e8c5c37b8413748dc4b05f"><tt>395c362b</tt></a> New option to make comments on resolver optional (<a href="https://github.com/99designs/gqlgen/pull/2627">#2627</a>)</summary>
* remove 'foo' above resolver
* regenerate after 6a3869707da1ffff7c196fcbcac44c92
* omit resolver template comment
* re-generate
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/239b97ee7578dc59d3d35421d8c4fd65f2506193"><tt>239b97ee</tt></a> Omittable input fields (<a href="https://github.com/99designs/gqlgen/pull/2585">#2585</a>)
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/2ad08ffffe7b2b3269b04a71808b15abb4dda7c1"><tt>2ad08fff</tt></a> Bugfix: add missing return statements in GRAPHQL and UrlEncodedForm transports. (<a href="https://github.com/99designs/gqlgen/pull/2625">#2625</a>)</summary>
Two transports (GRAPHQL and UrlEncodedForm) did not have return
statement at the end of `if err` block. Instead of returning
a 'could not cleanup body' error, we continued processing.
User still got an error. But instead of early 'could not cleanup'
error, user gor 'Internal system error' which happened a few
lines after the if block.
Tests are added.
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/a13eca12117720d9d6cccba8549c7e6934d09fbd"><tt>a13eca12</tt></a> update autogenerated gqlgen.yml with new options. (<a href="https://github.com/99designs/gqlgen/pull/2622">#2622</a>)
- <a href="https://github.com/99designs/gqlgen/commit/f1f63b52e33acbe10f389bc48d3132cb819f9f41"><tt>f1f63b52</tt></a> Post Release Changelog entry
- <a href="https://github.com/99designs/gqlgen/commit/81f3469f32baddab2ec29f51cd09308409e25577"><tt>81f3469f</tt></a> v0.17.30 postrelease bump
<!-- end of Commits -->
<!-- end of Else -->
<!-- end of If NoteGroups -->
<a name="v0.17.30"></a>
## [v0.17.30](https://github.com/99designs/gqlgen/compare/v0.17.29...v0.17.30) - 2023-04-20
- <a href="https://github.com/99designs/gqlgen/commit/4754e2b3c11870300277831e8f7183bc2d4c213e"><tt>4754e2b3</tt></a> release v0.17.30

View File

@ -103,9 +103,9 @@ nextArg:
return newArgs, nil
}
func (a *Data) Args() map[string][]*FieldArgument {
func (d *Data) Args() map[string][]*FieldArgument {
ret := map[string][]*FieldArgument{}
for _, o := range a.Objects {
for _, o := range d.Objects {
for _, f := range o.Fields {
if len(f.Args) > 0 {
ret[f.ArgsFunc()] = f.Args
@ -113,9 +113,9 @@ func (a *Data) Args() map[string][]*FieldArgument {
}
}
for _, d := range a.Directives() {
if len(d.Args) > 0 {
ret[d.ArgsFunc()] = d.Args
for _, directive := range d.Directives() {
if len(directive.Args) > 0 {
ret[directive.ArgsFunc()] = directive.Args
}
}
return ret

View File

@ -221,91 +221,99 @@ func (ref *TypeReference) Elem() *TypeReference {
return nil
}
func (t *TypeReference) IsPtr() bool {
_, isPtr := t.GO.(*types.Pointer)
func (ref *TypeReference) IsPtr() bool {
_, isPtr := ref.GO.(*types.Pointer)
return isPtr
}
// fix for https://github.com/golang/go/issues/31103 may make it possible to remove this (may still be useful)
func (t *TypeReference) IsPtrToPtr() bool {
if p, isPtr := t.GO.(*types.Pointer); isPtr {
func (ref *TypeReference) IsPtrToPtr() bool {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
_, isPtr := p.Elem().(*types.Pointer)
return isPtr
}
return false
}
func (t *TypeReference) IsNilable() bool {
return IsNilable(t.GO)
func (ref *TypeReference) IsNilable() bool {
return IsNilable(ref.GO)
}
func (t *TypeReference) IsSlice() bool {
_, isSlice := t.GO.(*types.Slice)
return t.GQL.Elem != nil && isSlice
func (ref *TypeReference) IsSlice() bool {
_, isSlice := ref.GO.(*types.Slice)
return ref.GQL.Elem != nil && isSlice
}
func (t *TypeReference) IsPtrToSlice() bool {
if t.IsPtr() {
_, isPointerToSlice := t.GO.(*types.Pointer).Elem().(*types.Slice)
func (ref *TypeReference) IsPtrToSlice() bool {
if ref.IsPtr() {
_, isPointerToSlice := ref.GO.(*types.Pointer).Elem().(*types.Slice)
return isPointerToSlice
}
return false
}
func (t *TypeReference) IsNamed() bool {
_, isSlice := t.GO.(*types.Named)
func (ref *TypeReference) IsPtrToIntf() bool {
if ref.IsPtr() {
_, isPointerToInterface := ref.GO.(*types.Pointer).Elem().(*types.Interface)
return isPointerToInterface
}
return false
}
func (ref *TypeReference) IsNamed() bool {
_, isSlice := ref.GO.(*types.Named)
return isSlice
}
func (t *TypeReference) IsStruct() bool {
_, isStruct := t.GO.Underlying().(*types.Struct)
func (ref *TypeReference) IsStruct() bool {
_, isStruct := ref.GO.Underlying().(*types.Struct)
return isStruct
}
func (t *TypeReference) IsScalar() bool {
return t.Definition.Kind == ast.Scalar
func (ref *TypeReference) IsScalar() bool {
return ref.Definition.Kind == ast.Scalar
}
func (t *TypeReference) UniquenessKey() string {
func (ref *TypeReference) UniquenessKey() string {
nullability := "O"
if t.GQL.NonNull {
if ref.GQL.NonNull {
nullability = "N"
}
elemNullability := ""
if t.GQL.Elem != nil && t.GQL.Elem.NonNull {
if ref.GQL.Elem != nil && ref.GQL.Elem.NonNull {
// Fix for #896
elemNullability = "ᚄ"
}
return nullability + t.Definition.Name + "2" + templates.TypeIdentifier(t.GO) + elemNullability
return nullability + ref.Definition.Name + "2" + templates.TypeIdentifier(ref.GO) + elemNullability
}
func (t *TypeReference) MarshalFunc() string {
if t.Definition == nil {
panic(errors.New("Definition missing for " + t.GQL.Name()))
func (ref *TypeReference) MarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if t.Definition.Kind == ast.InputObject {
if ref.Definition.Kind == ast.InputObject {
return ""
}
return "marshal" + t.UniquenessKey()
return "marshal" + ref.UniquenessKey()
}
func (t *TypeReference) UnmarshalFunc() string {
if t.Definition == nil {
panic(errors.New("Definition missing for " + t.GQL.Name()))
func (ref *TypeReference) UnmarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if !t.Definition.IsInputType() {
if !ref.Definition.IsInputType() {
return ""
}
return "unmarshal" + t.UniquenessKey()
return "unmarshal" + ref.UniquenessKey()
}
func (t *TypeReference) IsTargetNilable() bool {
return IsNilable(t.Target)
func (ref *TypeReference) IsTargetNilable() bool {
return IsNilable(ref.Target)
}
func (b *Binder) PushRef(ret *TypeReference) {

View File

@ -35,6 +35,7 @@ type Config struct {
ReturnPointersInUmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"`
ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"`
NullableInputOmittable bool `yaml:"nullable_input_omittable,omitempty"`
EnableModelJsonOmitemptyTag *bool `yaml:"enable_model_json_omitempty_tag,omitempty"`
SkipValidation bool `yaml:"skip_validation,omitempty"`
SkipModTidy bool `yaml:"skip_mod_tidy,omitempty"`
Sources []*ast.Source `yaml:"-"`
@ -324,6 +325,9 @@ func (c *Config) injectTypesFromSchema() error {
type TypeMapEntry struct {
Model StringList `yaml:"model"`
Fields map[string]TypeMapField `yaml:"fields,omitempty"`
// Key is the Go name of the field.
ExtraFields map[string]ModelExtraField `yaml:"extraFields,omitempty"`
}
type TypeMapField struct {
@ -332,6 +336,32 @@ type TypeMapField struct {
GeneratedMethod string `yaml:"-"`
}
type ModelExtraField struct {
// Type is the Go type of the field.
//
// It supports the builtin basic types (like string or int64), named types
// (qualified by the full package path), pointers to those types (prefixed
// with `*`), and slices of those types (prefixed with `[]`).
//
// For example, the following are valid types:
// string
// *github.com/author/package.Type
// []string
// []*github.com/author/package.Type
//
// Note that the type will be referenced from the generated/graphql, which
// means the package it lives in must not reference the generated/graphql
// package to avoid circular imports.
// restrictions.
Type string `yaml:"type"`
// OverrideTags is an optional override of the Go field tag.
OverrideTags string `yaml:"overrideTags"`
// Description is an optional the Go field doc-comment.
Description string `yaml:"description"`
}
type StringList []string
func (a *StringList) UnmarshalYAML(unmarshal func(interface{}) error) error {

View File

@ -94,7 +94,7 @@ func (ec *executionContext) {{ $field.FieldContextFunc }}(ctx context.Context, f
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.{{ $field.ArgsFunc }}(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
return
return fc, err
}
{{- end }}
return fc, nil

View File

@ -14,7 +14,6 @@
{{ reserveImport "github.com/99designs/gqlgen/graphql" }}
{{ reserveImport "github.com/99designs/gqlgen/graphql/introspection" }}
{{ if eq .Config.Exec.Layout "single-file" }}
// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface.
func NewExecutableSchema(cfg Config) graphql.ExecutableSchema {
@ -104,7 +103,7 @@
}
func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) {
ec := executionContext{nil, e}
ec := executionContext{nil, e, 0, 0, nil}
_ = ec
{{ if not .Config.OmitComplexity -}}
switch typeName + "." + field {
@ -139,7 +138,7 @@
func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
rc := graphql.GetOperationContext(ctx)
ec := executionContext{rc, e}
ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)}
inputUnmarshalMap := graphql.BuildUnmarshalerMap(
{{- range $input := .Inputs -}}
{{ if not $input.HasUnmarshal }}
@ -152,22 +151,39 @@
switch rc.Operation.Operation {
{{- if .QueryRoot }} case ast.Query:
return func(ctx context.Context) *graphql.Response {
if !first { return nil }
first = false
ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap)
{{ if .Directives.LocationDirectives "QUERY" -}}
data := ec._queryMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){
return ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet), nil
})
{{- else -}}
data := ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet)
{{- end }}
var response graphql.Response
var data graphql.Marshaler
if first {
first = false
ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap)
{{ if .Directives.LocationDirectives "QUERY" -}}
data = ec._queryMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){
return ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet), nil
})
{{- else -}}
data = ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet)
{{- end }}
} else {
if atomic.LoadInt32(&ec.pendingDeferred) > 0 {
result := <-ec.deferredResults
atomic.AddInt32(&ec.pendingDeferred, -1)
data = result.Result
response.Path = result.Path
response.Label = result.Label
response.Errors = result.Errors
} else {
return nil
}
}
var buf bytes.Buffer
data.MarshalGQL(&buf)
return &graphql.Response{
Data: buf.Bytes(),
response.Data = buf.Bytes()
if atomic.LoadInt32(&ec.deferred) > 0 {
hasNext := atomic.LoadInt32(&ec.pendingDeferred) > 0
response.HasNext = &hasNext
}
return &response
}
{{ end }}
@ -224,6 +240,28 @@
type executionContext struct {
*graphql.OperationContext
*executableSchema
deferred int32
pendingDeferred int32
deferredResults chan graphql.DeferredResult
}
func (ec *executionContext) processDeferredGroup(dg graphql.DeferredGroup) {
atomic.AddInt32(&ec.pendingDeferred, 1)
go func () {
ctx := graphql.WithFreshResponseContext(dg.Context)
dg.FieldSet.Dispatch(ctx)
ds := graphql.DeferredResult{
Path: dg.Path,
Label: dg.Label,
Result: dg.FieldSet,
Errors: graphql.GetErrors(ctx),
}
// null fields should bubble up
if dg.FieldSet.Invalids > 0 {
ds.Result = graphql.Null
}
ec.deferredResults <- ds
}()
}
func (ec *executionContext) introspectSchema() (*introspection.Schema, error) {

View File

@ -25,86 +25,121 @@ func (ec *executionContext) _{{$object.Name}}(ctx context.Context, sel ast.Selec
{{- else }}
func (ec *executionContext) _{{$object.Name}}(ctx context.Context, sel ast.SelectionSet{{ if not $object.Root }},obj {{$object.Reference | ref }}{{ end }}) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, {{$object.Name|lcFirst}}Implementors)
{{- if $object.Root }}
ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
Object: {{$object.Name|quote}},
})
{{end}}
{{- if $object.Root }}
ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
Object: {{$object.Name|quote}},
})
{{end}}
out := graphql.NewFieldSet(fields)
var invalids uint32
deferred := make(map[string]*graphql.FieldSet)
for i, field := range fields {
{{- if $object.Root }}
innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
Object: field.Name,
Field: field,
})
{{end}}
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString({{$object.Name|quote}})
{{- range $field := $object.Fields }}
case "{{$field.Name}}":
{{- if $field.IsConcurrent }}
field := field
{{- if $object.Root }}
innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
Object: field.Name,
Field: field,
})
{{end}}
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString({{$object.Name|quote}})
{{- range $field := $object.Fields }}
case "{{$field.Name}}":
{{- if $field.IsConcurrent }}
field := field
innerFunc := func(ctx context.Context) (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._{{$object.Name}}_{{$field.Name}}(ctx, field{{if not $object.Root}}, obj{{end}})
{{- if $field.TypeReference.GQL.NonNull }}
if res == graphql.Null {
{{- if $object.IsConcurrent }}
atomic.AddUint32(&invalids, 1)
{{- else }}
invalids++
{{- end }}
}
{{- end }}
return res
}
innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._{{$object.Name}}_{{$field.Name}}(ctx, field{{if not $object.Root}}, obj{{end}})
{{- if $field.TypeReference.GQL.NonNull }}
if res == graphql.Null {
{{- if $object.IsConcurrent }}
atomic.AddUint32(&fs.Invalids, 1)
{{- else }}
fs.Invalids++
{{- end }}
}
{{- end }}
return res
}
{{if $object.Root}}
rrm := func(ctx context.Context) graphql.Marshaler {
return ec.OperationContext.RootResolverMiddleware(ctx, innerFunc)
}
{{end}}
{{if $object.Root}}
rrm := func(ctx context.Context) graphql.Marshaler {
return ec.OperationContext.RootResolverMiddleware(ctx,
func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
}
{{end}}
out.Concurrently(i, func() graphql.Marshaler {
{{- if $object.Root -}}
return rrm(innerCtx)
{{- else -}}
return innerFunc(ctx)
{{end}}
})
{{- else }}
{{if $object.Root}}
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
return ec._{{$object.Name}}_{{$field.Name}}(ctx, field)
})
{{else}}
out.Values[i] = ec._{{$object.Name}}_{{$field.Name}}(ctx, field, obj)
{{end}}
{{if not $object.Root}}
if field.Deferrable != nil {
dfs, ok := deferred[field.Deferrable.Label]
di := 0
if ok {
dfs.AddField(field)
di = len(dfs.Values) - 1
} else {
dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
deferred[field.Deferrable.Label] = dfs
}
dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
return innerFunc(ctx, dfs)
})
{{- if $field.TypeReference.GQL.NonNull }}
if out.Values[i] == graphql.Null {
{{- if $object.IsConcurrent }}
atomic.AddUint32(&invalids, 1)
{{- else }}
invalids++
{{- end }}
}
{{- end }}
{{- end }}
{{- end }}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
// don't run the out.Concurrently() call below
out.Values[i] = graphql.Null
continue
}
{{end}}
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler {
{{- if $object.Root -}}
return rrm(innerCtx)
{{- else -}}
return innerFunc(ctx, out)
{{- end -}}
})
{{- else }}
{{- if $object.Root -}}
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
return ec._{{$object.Name}}_{{$field.Name}}(ctx, field)
})
{{- else -}}
out.Values[i] = ec._{{$object.Name}}_{{$field.Name}}(ctx, field, obj)
{{- end -}}
{{- if $field.TypeReference.GQL.NonNull }}
if out.Values[i] == graphql.Null {
{{- if $object.IsConcurrent }}
atomic.AddUint32(&out.Invalids, 1)
{{- else }}
out.Invalids++
{{- end }}
}
{{- end }}
{{- end }}
{{- end }}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 { return graphql.Null }
out.Dispatch(ctx)
if out.Invalids > 0 { return graphql.Null }
atomic.AddInt32(&ec.deferred, int32(len(deferred)))
for label, dfs := range deferred {
ec.processDeferredGroup(graphql.DeferredGroup{
Label: label,
Path: graphql.GetPath(ctx),
FieldSet: dfs,
Context: ctx,
})
}
return out
}
{{- end }}

View File

@ -76,7 +76,7 @@ func (e *executableSchema) Schema() *ast.Schema {
}
func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) {
ec := executionContext{nil, e}
ec := executionContext{nil, e, 0, 0, nil}
_ = ec
{{- if not .Config.OmitComplexity }}
switch typeName + "." + field {
@ -111,7 +111,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
rc := graphql.GetOperationContext(ctx)
ec := executionContext{rc, e}
ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)}
inputUnmarshalMap := graphql.BuildUnmarshalerMap(
{{- range $input := .Inputs -}}
{{ if not $input.HasUnmarshal }}
@ -124,22 +124,39 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
switch rc.Operation.Operation {
{{- if .QueryRoot }} case ast.Query:
return func(ctx context.Context) *graphql.Response {
if !first { return nil }
first = false
ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap)
{{ if .Directives.LocationDirectives "QUERY" -}}
data := ec._queryMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){
return ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet), nil
})
{{- else -}}
data := ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet)
{{- end }}
var response graphql.Response
var data graphql.Marshaler
if first {
first = false
ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap)
{{ if .Directives.LocationDirectives "QUERY" -}}
data = ec._queryMiddleware(ctx, rc.Operation, func(ctx context.Context) (interface{}, error){
return ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet), nil
})
{{- else -}}
data = ec._{{.QueryRoot.Name}}(ctx, rc.Operation.SelectionSet)
{{- end }}
} else {
if atomic.LoadInt32(&ec.pendingDeferred) > 0 {
result := <-ec.deferredResults
atomic.AddInt32(&ec.pendingDeferred, -1)
data = result.Result
response.Path = result.Path
response.Label = result.Label
response.Errors = result.Errors
} else {
return nil
}
}
var buf bytes.Buffer
data.MarshalGQL(&buf)
return &graphql.Response{
Data: buf.Bytes(),
response.Data = buf.Bytes()
if atomic.LoadInt32(&ec.deferred) > 0 {
hasNext := atomic.LoadInt32(&ec.pendingDeferred) > 0
response.HasNext = &hasNext
}
return &response
}
{{ end }}
@ -196,6 +213,28 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
type executionContext struct {
*graphql.OperationContext
*executableSchema
deferred int32
pendingDeferred int32
deferredResults chan graphql.DeferredResult
}
func (ec *executionContext) processDeferredGroup(dg graphql.DeferredGroup) {
atomic.AddInt32(&ec.pendingDeferred, 1)
go func () {
ctx := graphql.WithFreshResponseContext(dg.Context)
dg.FieldSet.Dispatch(ctx)
ds := graphql.DeferredResult{
Path: dg.Path,
Label: dg.Label,
Result: dg.FieldSet,
Errors: graphql.GetErrors(ctx),
}
// null fields should bubble up
if dg.FieldSet.Invalids > 0 {
ds.Result = graphql.Null
}
ec.deferredResults <- ds
}()
}
func (ec *executionContext) introspectSchema() (*introspection.Schema, error) {

View File

@ -517,7 +517,25 @@ func wordWalker(str string, f func(*wordInfo)) {
}
matchCommonInitial := false
if commonInitialisms[strings.ToUpper(word)] {
upperWord := strings.ToUpper(word)
if commonInitialisms[upperWord] {
// If the uppercase word (string(runes[w:i]) is "ID" or "IP"
// AND
// the word is the first two characters of the str
// AND
// that is not the end of the word
// AND
// the length of the string is greater than 3
// AND
// the third rune is an uppercase one
// THEN
// do NOT count this as an initialism.
switch upperWord {
case "ID", "IP":
if word == str[:2] && !eow && len(str) > 3 && unicode.IsUpper(runes[3]) {
continue
}
}
hasCommonInitial = true
matchCommonInitial = true
}

View File

@ -26,7 +26,7 @@ func processType(ret map[string]*config.TypeReference, ref *config.TypeReference
}
ret[key] = ref
if ref.IsSlice() || ref.IsPtrToSlice() || ref.IsPtrToPtr() {
if ref.IsSlice() || ref.IsPtrToSlice() || ref.IsPtrToPtr() || ref.IsPtrToIntf() {
processType(ret, ref.Elem())
}
}

View File

@ -4,7 +4,7 @@
{{- if and $type.IsNilable (not $type.GQL.NonNull) (not $type.IsPtrToPtr) }}
if v == nil { return nil, nil }
{{- end }}
{{- if $type.IsPtrToSlice }}
{{- if or $type.IsPtrToSlice $type.IsPtrToIntf }}
res, err := ec.{{ $type.Elem.UnmarshalFunc }}(ctx, v)
return &res, graphql.ErrorOnPath(ctx, err)
{{- else if $type.IsSlice }}
@ -89,7 +89,7 @@
{{ with $type.MarshalFunc }}
func (ec *executionContext) {{ . }}(ctx context.Context, sel ast.SelectionSet, v {{ $type.GO | ref }}) graphql.Marshaler {
{{- if $type.IsPtrToSlice }}
{{- if or $type.IsPtrToSlice $type.IsPtrToIntf }}
return ec.{{ $type.Elem.MarshalFunc }}(ctx, sel, *v)
{{- else if $type.IsSlice }}
{{- if not $type.GQL.NonNull }}

View File

@ -15,15 +15,15 @@ type Cache interface {
type MapCache map[string]interface{}
// Get looks up a key's value from the cache.
func (m MapCache) Get(ctx context.Context, key string) (value interface{}, ok bool) {
func (m MapCache) Get(_ context.Context, key string) (value interface{}, ok bool) {
v, ok := m[key]
return v, ok
}
// Add adds a value to the cache.
func (m MapCache) Add(ctx context.Context, key string, value interface{}) { m[key] = value }
func (m MapCache) Add(_ context.Context, key string, value interface{}) { m[key] = value }
type NoCache struct{}
func (n NoCache) Get(ctx context.Context, key string) (value interface{}, ok bool) { return nil, false }
func (n NoCache) Add(ctx context.Context, key string, value interface{}) {}
func (n NoCache) Get(_ context.Context, _ string) (value interface{}, ok bool) { return nil, false }
func (n NoCache) Add(_ context.Context, _ string, _ interface{}) {}

View File

@ -36,6 +36,14 @@ func WithResponseContext(ctx context.Context, presenterFunc ErrorPresenterFunc,
})
}
func WithFreshResponseContext(ctx context.Context) context.Context {
e := getResponseContext(ctx)
return context.WithValue(ctx, resultCtx, &responseContext{
errorPresenter: e.errorPresenter,
recover: e.recover,
})
}
// AddErrorf writes a formatted error to the client, first passing it through the error presenter.
func AddErrorf(ctx context.Context, format string, args ...interface{}) {
AddError(ctx, fmt.Errorf(format, args...))

26
vendor/github.com/99designs/gqlgen/graphql/deferred.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package graphql
import (
"context"
"github.com/vektah/gqlparser/v2/ast"
"github.com/vektah/gqlparser/v2/gqlerror"
)
type Deferrable struct {
Label string
}
type DeferredGroup struct {
Path ast.Path
Label string
FieldSet *FieldSet
Context context.Context
}
type DeferredResult struct {
Path ast.Path
Label string
Result Marshaler
Errors gqlerror.List
}

View File

@ -45,9 +45,19 @@ func collectFields(reqCtx *OperationContext, selSet ast.SelectionSet, satisfies
if len(satisfies) > 0 && !instanceOf(sel.TypeCondition, satisfies) {
continue
}
shouldDefer, label := deferrable(sel.Directives, reqCtx.Variables)
for _, childField := range collectFields(reqCtx, sel.SelectionSet, satisfies, visited) {
f := getOrCreateAndAppendField(&groupedFields, childField.Name, childField.Alias, childField.ObjectDefinition, func() CollectedField { return childField })
f := getOrCreateAndAppendField(
&groupedFields, childField.Name, childField.Alias, childField.ObjectDefinition,
func() CollectedField { return childField })
f.Selections = append(f.Selections, childField.Selections...)
if shouldDefer {
f.Deferrable = &Deferrable{
Label: label,
}
}
}
case *ast.FragmentSpread:
@ -70,9 +80,16 @@ func collectFields(reqCtx *OperationContext, selSet ast.SelectionSet, satisfies
continue
}
shouldDefer, label := deferrable(sel.Directives, reqCtx.Variables)
for _, childField := range collectFields(reqCtx, fragment.SelectionSet, satisfies, visited) {
f := getOrCreateAndAppendField(&groupedFields, childField.Name, childField.Alias, childField.ObjectDefinition, func() CollectedField { return childField })
f := getOrCreateAndAppendField(&groupedFields,
childField.Name, childField.Alias, childField.ObjectDefinition,
func() CollectedField { return childField })
f.Selections = append(f.Selections, childField.Selections...)
if shouldDefer {
f.Deferrable = &Deferrable{Label: label}
}
}
default:
@ -87,6 +104,7 @@ type CollectedField struct {
*ast.Field
Selections ast.SelectionSet
Deferrable *Deferrable
}
func instanceOf(val string, satisfies []string) bool {
@ -150,6 +168,32 @@ func shouldIncludeNode(directives ast.DirectiveList, variables map[string]interf
return !skip && include
}
func deferrable(directives ast.DirectiveList, variables map[string]interface{}) (shouldDefer bool, label string) {
d := directives.ForName("defer")
if d == nil {
return false, ""
}
shouldDefer = true
for _, arg := range d.Arguments {
switch arg.Name {
case "if":
if value, err := arg.Value.Value(variables); err == nil {
shouldDefer, _ = value.(bool)
}
case "label":
if value, err := arg.Value.Value(variables); err == nil {
label, _ = value.(string)
}
default:
panic(fmt.Sprintf("defer: argument '%s' not supported", arg.Name))
}
}
return shouldDefer, label
}
func resolveIfArgument(d *ast.Directive, variables map[string]interface{}) bool {
arg := d.Arguments.ForName("if")
if arg == nil {

View File

@ -1,19 +1,21 @@
package graphql
import (
"context"
"io"
"sync"
)
type FieldSet struct {
fields []CollectedField
Values []Marshaler
delayed []delayedResult
fields []CollectedField
Values []Marshaler
Invalids uint32
delayed []delayedResult
}
type delayedResult struct {
i int
f func() Marshaler
f func(context.Context) Marshaler
}
func NewFieldSet(fields []CollectedField) *FieldSet {
@ -23,15 +25,20 @@ func NewFieldSet(fields []CollectedField) *FieldSet {
}
}
func (m *FieldSet) Concurrently(i int, f func() Marshaler) {
func (m *FieldSet) AddField(field CollectedField) {
m.fields = append(m.fields, field)
m.Values = append(m.Values, nil)
}
func (m *FieldSet) Concurrently(i int, f func(context.Context) Marshaler) {
m.delayed = append(m.delayed, delayedResult{i: i, f: f})
}
func (m *FieldSet) Dispatch() {
func (m *FieldSet) Dispatch(ctx context.Context) {
if len(m.delayed) == 1 {
// only one concurrent task, no need to spawn a goroutine or deal create waitgroups
d := m.delayed[0]
m.Values[d.i] = d.f()
m.Values[d.i] = d.f(ctx)
} else if len(m.delayed) > 1 {
// more than one concurrent task, use the main goroutine to do one, only spawn goroutines for the others
@ -39,12 +46,12 @@ func (m *FieldSet) Dispatch() {
for _, d := range m.delayed[1:] {
wg.Add(1)
go func(d delayedResult) {
m.Values[d.i] = d.f()
m.Values[d.i] = d.f(ctx)
wg.Done()
}(d)
}
m.Values[m.delayed[0].i] = m.delayed[0].f()
m.Values[m.delayed[0].i] = m.delayed[0].f(ctx)
wg.Wait()
}
}

View File

@ -59,17 +59,17 @@ func (c *ComplexityLimit) Validate(schema graphql.ExecutableSchema) error {
func (c ComplexityLimit) MutateOperationContext(ctx context.Context, rc *graphql.OperationContext) *gqlerror.Error {
op := rc.Doc.Operations.ForName(rc.OperationName)
complexity := complexity.Calculate(c.es, op, rc.Variables)
complexityCalcs := complexity.Calculate(c.es, op, rc.Variables)
limit := c.Func(ctx, rc)
rc.Stats.SetExtension(complexityExtension, &ComplexityStats{
Complexity: complexity,
Complexity: complexityCalcs,
ComplexityLimit: limit,
})
if complexity > limit {
err := gqlerror.Errorf("operation has complexity %d, which exceeds the limit of %d", complexity, limit)
if complexityCalcs > limit {
err := gqlerror.Errorf("operation has complexity %d, which exceeds the limit of %d", complexityCalcs, limit)
errcode.Set(err, errComplexityLimit)
return err
}

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"github.com/vektah/gqlparser/v2/ast"
"github.com/vektah/gqlparser/v2/gqlerror"
)
@ -14,6 +15,9 @@ import (
type Response struct {
Errors gqlerror.List `json:"errors,omitempty"`
Data json.RawMessage `json:"data"`
Label string `json:"label,omitempty"`
Path ast.Path `json:"path,omitempty"`
HasNext *bool `json:"hasNext,omitempty"`
Extensions map[string]interface{} `json:"extensions,omitempty"`
}

View File

@ -60,7 +60,7 @@ func MarshalUint32(i uint32) Marshaler {
func UnmarshalUint32(v interface{}) (uint32, error) {
switch v := v.(type) {
case string:
iv, err := strconv.ParseInt(v, 10, 32)
iv, err := strconv.ParseUint(v, 10, 32)
if err != nil {
return 0, err
}

View File

@ -1,3 +1,3 @@
package graphql
const Version = "v0.17.31"
const Version = "v0.17.33"

View File

@ -6,7 +6,7 @@ import (
)
// CompatibleTypes isnt a strict comparison, it allows for pointer differences
func CompatibleTypes(expected types.Type, actual types.Type) error {
func CompatibleTypes(expected, actual types.Type) error {
// Special case to deal with pointer mismatches
{
expectedPtr, expectedIsPtr := expected.(*types.Pointer)
@ -84,11 +84,8 @@ func CompatibleTypes(expected types.Type, actual types.Type) error {
if err := CompatibleTypes(expected.Params(), actual.Params()); err != nil {
return err
}
if err := CompatibleTypes(expected.Results(), actual.Results()); err != nil {
return err
}
return nil
err := CompatibleTypes(expected.Results(), actual.Results())
return err
}
case *types.Interface:
if actual, ok := actual.(*types.Interface); ok {
@ -114,11 +111,8 @@ func CompatibleTypes(expected types.Type, actual types.Type) error {
return err
}
if err := CompatibleTypes(expected.Elem(), actual.Elem()); err != nil {
return err
}
return nil
err := CompatibleTypes(expected.Elem(), actual.Elem())
return err
}
case *types.Chan:

View File

@ -138,7 +138,7 @@ func extractModuleName(content []byte) string {
break
}
s := strings.Trim(string(tkn), " \t")
if len(s) != 0 && !strings.HasPrefix(s, "//") {
if s != "" && !strings.HasPrefix(s, "//") {
break
}
if advance <= len(content) {
@ -171,4 +171,4 @@ func ImportPathForDir(dir string) (res string) {
return ""
}
var modregex = regexp.MustCompile(`module ([^\s]*)`)
var modregex = regexp.MustCompile(`module (\S*)`)

View File

@ -14,8 +14,10 @@ import (
"golang.org/x/tools/go/packages"
)
var once = sync.Once{}
var modInfo *debug.BuildInfo
var (
once = sync.Once{}
modInfo *debug.BuildInfo
)
var mode = packages.NeedName |
packages.NeedFiles |

View File

@ -54,7 +54,7 @@ func QualifyPackagePath(importPath string) string {
return pkg.ImportPath
}
var invalidPackageNameChar = regexp.MustCompile(`[^\w]`)
var invalidPackageNameChar = regexp.MustCompile(`\W`)
func SanitizePackageName(pkg string) string {
return invalidPackageNameChar.ReplaceAllLiteralString(filepath.Base(pkg), "_")

View File

@ -68,7 +68,7 @@ func (r *Rewriter) getFile(filename string) string {
return r.files[filename]
}
func (r *Rewriter) GetPrevDecl(structname string, methodname string) *ast.FuncDecl {
func (r *Rewriter) GetPrevDecl(structname, methodname string) *ast.FuncDecl {
for _, f := range r.pkg.Syntax {
for _, d := range f.Decls {
d, isFunc := d.(*ast.FuncDecl)
@ -99,7 +99,7 @@ func (r *Rewriter) GetPrevDecl(structname string, methodname string) *ast.FuncDe
return nil
}
func (r *Rewriter) GetMethodComment(structname string, methodname string) string {
func (r *Rewriter) GetMethodComment(structname, methodname string) string {
d := r.GetPrevDecl(structname, methodname)
if d != nil {
return d.Doc.Text()
@ -107,7 +107,7 @@ func (r *Rewriter) GetMethodComment(structname string, methodname string) string
return ""
}
func (r *Rewriter) GetMethodBody(structname string, methodname string) string {
func (r *Rewriter) GetMethodBody(structname, methodname string) string {
d := r.GetPrevDecl(structname, methodname)
if d != nil {
return r.getSource(d.Body.Pos()+1, d.Body.End()-1)

File diff suppressed because it is too large Load Diff

View File

@ -86,28 +86,55 @@ func (f *federation) MutateConfig(cfg *config.Config) error {
}
func (f *federation) InjectSourceEarly() *ast.Source {
input := `
scalar _Any
scalar _FieldSet
directive @requires(fields: _FieldSet!) on FIELD_DEFINITION
directive @provides(fields: _FieldSet!) on FIELD_DEFINITION
directive @extends on OBJECT | INTERFACE
`
input := ``
// add version-specific changes on key directive, as well as adding the new directives for federation 2
if f.Version == 1 {
input += `
directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE
directive @requires(fields: _FieldSet!) on FIELD_DEFINITION
directive @provides(fields: _FieldSet!) on FIELD_DEFINITION
directive @extends on OBJECT | INTERFACE
directive @external on FIELD_DEFINITION
scalar _Any
scalar _FieldSet
`
} else if f.Version == 2 {
input += `
directive @key(fields: _FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE
directive @external on FIELD_DEFINITION | OBJECT
directive @composeDirective(name: String!) repeatable on SCHEMA
directive @extends on OBJECT | INTERFACE
directive @external on OBJECT | FIELD_DEFINITION
directive @key(fields: FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE
directive @inaccessible on
| ARGUMENT_DEFINITION
| ENUM
| ENUM_VALUE
| FIELD_DEFINITION
| INPUT_FIELD_DEFINITION
| INPUT_OBJECT
| INTERFACE
| OBJECT
| SCALAR
| UNION
directive @interfaceObject on OBJECT
directive @link(import: [String!], url: String!) repeatable on SCHEMA
directive @shareable on OBJECT | FIELD_DEFINITION
directive @tag(name: String!) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION
directive @override(from: String!) on FIELD_DEFINITION
directive @inaccessible on SCALAR | OBJECT | FIELD_DEFINITION | ARGUMENT_DEFINITION | INTERFACE | UNION | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION
directive @provides(fields: FieldSet!) on FIELD_DEFINITION
directive @requires(fields: FieldSet!) on FIELD_DEFINITION
directive @shareable repeatable on FIELD_DEFINITION | OBJECT
directive @tag(name: String!) repeatable on
| ARGUMENT_DEFINITION
| ENUM
| ENUM_VALUE
| FIELD_DEFINITION
| INPUT_FIELD_DEFINITION
| INPUT_OBJECT
| INTERFACE
| OBJECT
| SCALAR
| UNION
scalar _Any
scalar FieldSet
`
}
return &ast.Source{
@ -123,11 +150,18 @@ func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source {
f.setEntities(schema)
var entities, resolvers, entityResolverInputDefinitions string
for i, e := range f.Entities {
if i != 0 {
entities += " | "
for _, e := range f.Entities {
if e.Def.Kind != ast.Interface {
if entities != "" {
entities += " | "
}
entities += e.Name
} else if len(schema.GetPossibleTypes(e.Def)) == 0 {
fmt.Println(
"skipping @key field on interface " + e.Def.Name + " as no types implement it",
)
}
entities += e.Name
for _, r := range e.Resolvers {
if e.Multi {
@ -206,6 +240,16 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
for _, e := range f.Entities {
obj := data.Objects.ByName(e.Def.Name)
if e.Def.Kind == ast.Interface {
if len(data.Interfaces[e.Def.Name].Implementors) == 0 {
fmt.Println(
"skipping @key field on interface " + e.Def.Name + " as no types implement it",
)
continue
}
obj = data.Objects.ByName(data.Interfaces[e.Def.Name].Implementors[0].Name)
}
for _, r := range e.Resolvers {
// fill in types for key fields
//
@ -267,6 +311,12 @@ func (f *federation) setEntities(schema *ast.Schema) {
if !ok {
continue
}
if (schemaType.Kind == ast.Interface) && (len(schema.GetPossibleTypes(schemaType)) == 0) {
fmt.Printf("@key directive found on unused \"interface %s\". Will be ignored.\n", schemaType.Name)
continue
}
e := &Entity{
Name: schemaType.Name,
Def: schemaType,
@ -385,10 +435,12 @@ func isFederatedEntity(schemaType *ast.Definition) ([]*ast.Directive, bool) {
return keys, true
}
case ast.Interface:
// TODO: support @key and @extends for interfaces
if dir := schemaType.Directives.ForName("key"); dir != nil {
fmt.Printf("@key directive found on \"interface %s\". Will be ignored.\n", schemaType.Name)
keys := schemaType.Directives.ForNames("key")
if len(keys) > 0 {
return keys, true
}
// TODO: support @extends for interfaces
if dir := schemaType.Directives.ForName("extends"); dir != nil {
panic(
fmt.Sprintf(

View File

@ -374,7 +374,7 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition)
GoName: name,
Type: typ,
Description: field.Description,
Tag: getStructTagFromField(field),
Tag: getStructTagFromField(cfg, field),
Omittable: cfg.NullableInputOmittable && schemaType.Kind == ast.InputObject && !field.Type.NonNull,
}
@ -409,11 +409,40 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition)
fields = append(fields, f)
}
// appending extra fields at the end of the fields list.
modelcfg := cfg.Models[schemaType.Name]
if len(modelcfg.ExtraFields) > 0 {
ff := make([]*Field, 0, len(modelcfg.ExtraFields))
for fname, fspec := range modelcfg.ExtraFields {
ftype := buildType(fspec.Type)
tag := `json:"-"`
if fspec.OverrideTags != "" {
tag = fspec.OverrideTags
}
ff = append(ff,
&Field{
Name: fname,
GoName: fname,
Type: ftype,
Description: fspec.Description,
Tag: tag,
})
}
sort.Slice(ff, func(i, j int) bool {
return ff[i].Name < ff[j].Name
})
fields = append(fields, ff...)
}
return fields, nil
}
func getStructTagFromField(field *ast.FieldDefinition) string {
if !field.Type.NonNull {
func getStructTagFromField(cfg *config.Config, field *ast.FieldDefinition) string {
if !field.Type.NonNull && (cfg.EnableModelJsonOmitemptyTag == nil || *cfg.EnableModelJsonOmitemptyTag) {
return `json:"` + field.Name + `,omitempty"`
}
return `json:"` + field.Name + `"`

View File

@ -0,0 +1,47 @@
package modelgen
import (
"go/types"
"strings"
)
// buildType constructs a types.Type for the given string (using the syntax
// from the extra field config Type field).
func buildType(typeString string) types.Type {
switch {
case typeString[0] == '*':
return types.NewPointer(buildType(typeString[1:]))
case strings.HasPrefix(typeString, "[]"):
return types.NewSlice(buildType(typeString[2:]))
default:
return buildNamedType(typeString)
}
}
// buildNamedType returns the specified named or builtin type.
//
// Note that we don't look up the full types.Type object from the appropriate
// package -- gqlgen doesn't give us the package-map we'd need to do so.
// Instead we construct a placeholder type that has all the fields gqlgen
// wants. This is roughly what gqlgen itself does, anyway:
// https://github.com/99designs/gqlgen/blob/master/plugin/modelgen/models.go#L119
func buildNamedType(fullName string) types.Type {
dotIndex := strings.LastIndex(fullName, ".")
if dotIndex == -1 { // builtinType
return types.Universe.Lookup(fullName).Type()
}
// type is pkg.Name
pkgPath := fullName[:dotIndex]
typeName := fullName[dotIndex+1:]
pkgName := pkgPath
slashIndex := strings.LastIndex(pkgPath, "/")
if slashIndex != -1 {
pkgName = pkgPath[slashIndex+1:]
}
pkg := types.NewPackage(pkgPath, pkgName)
// gqlgen doesn't use some of the fields, so we leave them 0/nil
return types.NewNamed(types.NewTypeName(0, pkg, typeName, nil), nil, nil)
}

View File

@ -5,3 +5,7 @@
vendor/
dist/
.env
bin/
*.php
test/*.go
*.txt

67
vendor/github.com/adhocore/gronx/.goreleaser.yml generated vendored Normal file
View File

@ -0,0 +1,67 @@
project_name: tasker
release:
prerelease: auto
name_template: "Version v{{.Version}}"
# draft: true
mode: "keep-existing"
before:
hooks:
- go mod tidy
builds:
- <<: &build_defaults
binary: bin/tasker
main: ./cmd/tasker
ldflags:
- -X main.Version={{.Version}}
env:
- CGO_ENABLED=0
id: macOS
goos: [darwin]
goarch: [amd64, arm64]
- <<: *build_defaults
id: linux
goos: [linux]
goarch: [386, arm, amd64, arm64]
- <<: *build_defaults
id: windows
goos: [windows]
goarch: [amd64]
archives:
- id: nix
builds: [macOS, linux]
<<: &archive_defaults
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
wrap_in_directory: true
rlcp: true
format: tar.gz
files:
- LICENSE
- id: windows
builds: [windows]
<<: *archive_defaults
wrap_in_directory: false
format: zip
files:
- LICENSE
checksum:
name_template: 'checksums.txt'
algorithm: sha256
changelog:
skip: true
use: github
sort: desc
filters:
exclude:
- '^doc:'
- '^dev:'
- '^build:'
- '^ci:'

View File

@ -6,16 +6,16 @@
[![Test](https://github.com/adhocore/gronx/actions/workflows/test-action.yml/badge.svg)](https://github.com/adhocore/gronx/actions/workflows/test-action.yml)
[![Lint](https://github.com/adhocore/gronx/actions/workflows/lint-action.yml/badge.svg)](https://github.com/adhocore/gronx/actions/workflows/lint-action.yml)
[![Codecov](https://img.shields.io/codecov/c/github/adhocore/gronx/main.svg?style=flat-square)](https://codecov.io/gh/adhocore/gronx)
[![Donate 15](https://img.shields.io/badge/donate-paypal-blue.svg?style=flat-square&label=donate+15)](https://www.paypal.me/ji10/15usd)
[![Donate 25](https://img.shields.io/badge/donate-paypal-blue.svg?style=flat-square&label=donate+25)](https://www.paypal.me/ji10/25usd)
[![Donate 50](https://img.shields.io/badge/donate-paypal-blue.svg?style=flat-square&label=donate+50)](https://www.paypal.me/ji10/50usd)
[![Support](https://img.shields.io/static/v1?label=Support&message=%E2%9D%A4&logo=GitHub)](https://github.com/sponsors/adhocore)
[![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/intent/tweet?text=Lightweight+fast+and+deps+free+cron+expression+parser+for+Golang&url=https://github.com/adhocore/gronx&hashtags=go,golang,parser,cron,cronexpr,cronparser)
`gronx` is Golang cron expression parser ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) with task runner
`gronx` is Golang [cron expression](#cron-expression) parser ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) with task runner
and daemon that supports crontab like task list file. Use it programatically in Golang or as standalone binary instead of crond.
- Zero dependency.
- Very **fast** because it bails early in case a segment doesn't match.
- Built in crontab like daemon.
- Supports time granularity of Seconds.
Find gronx in [pkg.go.dev](https://pkg.go.dev/github.com/adhocore/gronx).
@ -47,9 +47,32 @@ gron.IsDue(expr) // true|false, nil
gron.IsDue(expr, time.Date(2021, time.April, 1, 1, 1, 0, 0, time.UTC)) // true|false, nil
```
### Batch Due Check
If you have multiple cron expressions to check due on same reference time use `BatchDue()`:
```go
gron := gronx.New()
exprs := []string{"* * * * *", "0 */5 * * * *"}
// gives []gronx.Expr{} array, each item has Due flag and Err enountered.
dues := gron.BatchDue(exprs)
for _, expr := range dues {
if expr.Err != nil {
// Handle err
} else if expr.Due {
// Handle due
}
}
// Or with given time
ref := time.Now()
gron.BatchDue(exprs, ref)
```
### Next Tick
To find out when is the cron due next (onwards):
To find out when is the cron due next (in near future):
```go
allowCurrent = true // includes current time as well
nextTime, err := gron.NextTick(expr, allowCurrent) // gives time.Time, error
@ -60,11 +83,26 @@ allowCurrent = false // excludes the ref time
nextTime, err := gron.NextTickAfter(expr, refTime, allowCurrent) // gives time.Time, error
```
### Prev Tick
To find out when was the cron due previously (in near past):
```go
allowCurrent = true // includes current time as well
prevTime, err := gron.PrevTick(expr, allowCurrent) // gives time.Time, error
// OR, prev tick before certain reference time
refTime = time.Date(2022, time.November, 1, 1, 1, 0, 0, time.UTC)
allowCurrent = false // excludes the ref time
nextTime, err := gron.PrevTickBefore(expr, refTime, allowCurrent) // gives time.Time, error
```
> The working of `PrevTick*()` and `NextTick*()` are mostly the same except the direction.
> They differ in lookback or lookahead.
### Standalone Daemon
In a more practical level, you would use this tool to manage and invoke jobs in app itself and not
mess around with `crontab` for each and every new tasks/jobs. ~~It doesn't yet replace that but rather supplements it.
There is a plan though [#1](https://github.com/adhocore/gronx/issues/1)~~.
mess around with `crontab` for each and every new tasks/jobs.
In crontab just put one entry with `* * * * *` which points to your Go entry point that uses this tool.
Then in that entry point you would invoke different tasks if the corresponding Cron expr is due.
@ -75,7 +113,7 @@ Check the section below for more sophisticated way of managing tasks automatical
---
### Go Tasker
Tasker is a task manager that can be programatically used in Golang applications. It runs as a daemon and and invokes tasks scheduled with cron expression:
Tasker is a task manager that can be programatically used in Golang applications. It runs as a daemon and invokes tasks scheduled with cron expression:
```go
package main
@ -108,8 +146,12 @@ func main() {
return 0, nil
})
// run task without overlap, set concurrent flag to false:
concurrent := false
taskr.Task("* * * * * *", , tasker.Taskify("sleep 2", tasker.Option{}), concurrent)
// every 10 minute with arbitrary command
taskr.Task("@10minutes", taskr.Taskify("command --option val -- args"))
taskr.Task("@10minutes", taskr.Taskify("command --option val -- args", tasker.Option{Shell: "/bin/sh -c"}))
// ... add more tasks
@ -122,14 +164,31 @@ func main() {
}
```
#### Concurrency
By default the tasks can run concurrently i.e if previous run is still not finished
but it is now due again, it will run again.
If you want to run only one instance of a task at a time, set concurrent flag to false:
```go
taskr := tasker.New(tasker.Option{})
concurrent := false
expr, task := "* * * * * *", tasker.Taskify("php -r 'sleep(2);'")
taskr.Task(expr, task, concurrent)
```
### Task Daemon
It can also be used as standalone task daemon instead of programmatic usage for Golang application.
First, just install tasker command:
```sh
go get -u github.com/adhocore/gronx/cmd/tasker
go install github.com/adhocore/gronx/cmd/tasker@latest
```
Or you can also download latest prebuilt binary from [release](https://github.com/adhocore/gronx/releases/latest) for platform of your choice.
Then prepare a taskfile ([example](./tests/../test/taskfile.txt)) in crontab format
(or can even point to existing crontab).
> `user` is not supported: it is just cron expr followed by the command.
@ -141,6 +200,7 @@ tasker -file path/to/taskfile
> You can pass more options to control the behavior of task daemon, see below.
#### Tasker command options:
```txt
-file string <required>
The task file in crontab format
@ -169,6 +229,7 @@ tasker -tz America/New_York -file path/to/taskfile -shell zsh # run all tasks us
> Same timezone applies for all tasks currently and it might support overriding timezone per task in future release.
#### Notes on Windows
In Windows if it doesn't find `bash.exe` or `git-bash.exe` it will use `powershell`.
`powershell` may not be compatible with Unix flavored commands. Also to note:
you can't do chaining with `cmd1 && cmd2` but rather `cmd1 ; cmd2`.
@ -176,29 +237,40 @@ you can't do chaining with `cmd1 && cmd2` but rather `cmd1 ; cmd2`.
---
### Cron Expression
Cron expression normally consists of 5 segments viz:
A complete cron expression consists of 7 segments viz:
```
<second> <minute> <hour> <day> <month> <weekday> <year>
```
However only 5 will do and this is most commonly used. 5 segments are interpreted as:
```
<minute> <hour> <day> <month> <weekday>
```
and sometimes there can be 6th segment for `<year>` at the end.
in which case a default value of 0 is prepended for `<second>` position.
For each segments you can have multiple choices separated by comma:
> Eg: `0,30 * * * *` means either 0th or 30th minute.
In a 6 segments expression, if 6th segment matches `<year>` (i.e 4 digits at least) it will be interpreted as:
```
<minute> <hour> <day> <month> <weekday> <year>
```
and a default value of 0 is prepended for `<second>` position.
To specify range of values you can use dash:
> Eg: `10-15 * * * *` means 10th, 11th, 12th, 13th, 14th and 15th minute.
For each segments you can have **multiple choices** separated by comma:
> Eg: `0 0,30 * * * *` means either 0th or 30th minute.
To specify range of step you can combine a dash and slash:
> Eg: `10-15/2 * * * *` means every 2 minutes between 10 and 15 i.e 10th, 12th and 14th minute.
To specify **range of values** you can use dash:
> Eg: `0 10-15 * * * *` means 10th, 11th, 12th, 13th, 14th and 15th minute.
For the 3rd and 5th segment, there are additional [modifiers](#modifiers) (optional).
To specify **range of step** you can combine a dash and slash:
> Eg: `0 10-15/2 * * * *` means every 2 minutes between 10 and 15 i.e 10th, 12th and 14th minute.
And if you want, you can mix them up:
> `5,12-20/4,55 * * * *` matches if any one of `5` or `12-20/4` or `55` matches the minute.
For the `<day>` and `<weekday>` segment, there are additional [**modifiers**](#modifiers) (optional).
And if you want, you can mix the multiple choices, ranges and steps in a single expression:
> `0 5,12-20/4,55 * * * *` matches if any one of `5` or `12-20/4` or `55` matches the minute.
### Real Abbreviations
You can use real abbreviations for month and week days. eg: `JAN`, `dec`, `fri`, `SUN`
You can use real abbreviations (3 chars) for month and week days. eg: `JAN`, `dec`, `fri`, `SUN`
### Tags
@ -214,8 +286,13 @@ Following tags are available and they are converted to real cron expressions bef
- *@15minutes* - every 15 minutes
- *@30minutes* - every 30 minutes
- *@always* - every minute
- *@everysecond* - every second
> For BC reasons, `@always` still means every minute for now, in future release it may mean every seconds instead.
```go
// Use tags like so:
gron.IsDue("@hourly")
gron.IsDue("@5minutes")
```
@ -223,10 +300,10 @@ gron.IsDue("@5minutes")
Following modifiers supported
- *Day of Month / 3rd segment:*
- *Day of Month / 3rd of 5 segments / 4th of 6+ segments:*
- `L` stands for last day of month (eg: `L` could mean 29th for February in leap year)
- `W` stands for closest week day (eg: `10W` is closest week days (MON-FRI) to 10th date)
- *Day of Week / 5th segment:*
- *Day of Week / 5th of 5 segments / 6th of 6+ segments:*
- `L` stands for last weekday of month (eg: `2L` is last monday)
- `#` stands for nth day of week in the month (eg: `1#2` is second sunday)
@ -242,9 +319,10 @@ release managed by [please](https://github.com/adhocore/please).
---
### Other projects
My other golang projects you might find interesting and useful:
- [**urlsh**](https://github.com/adhocore/urlsh) - URL shortener and bookmarker service with UI, API, Cache, Hits Counter and forwarder using postgres and redis in backend, bulma in frontend; has [web](https://urlssh.xyz) and cli client
- [**fast**](https://github.com/adhocore/fast) - Check your internet speed with ease and comfort right from the terminal
- [**goic**](https://github.com/adhocore/goic) - Go Open ID Connect, is OpenID connect client library for Golang, supports the Authorization Code Flow of OpenID Connect specification.
- [**chin**](https://github.com/adhocore/chin) - A GO lang command line tool to show a spinner as user waits for some long running jobs to finish.
- [**chin**](https://github.com/adhocore/chin) - A Go lang command line tool to show a spinner as user waits for some long running jobs to finish.

51
vendor/github.com/adhocore/gronx/batch.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package gronx
import (
"strings"
"time"
)
// Expr represents an item in array for batch check
type Expr struct {
Expr string
Due bool
Err error
}
// BatchDue checks if multiple expressions are due for given time (or now).
// It returns []Expr with filled in Due and Err values.
func (g *Gronx) BatchDue(exprs []string, ref ...time.Time) []Expr {
ref = append(ref, time.Now())
g.C.SetRef(ref[0])
var segs []string
cache, batch := map[string]Expr{}, make([]Expr, len(exprs))
for i := range exprs {
batch[i].Expr = exprs[i]
segs, batch[i].Err = Segments(exprs[i])
key := strings.Join(segs, " ")
if batch[i].Err != nil {
cache[key] = batch[i]
continue
}
if c, ok := cache[key]; ok {
batch[i] = c
batch[i].Expr = exprs[i]
continue
}
due := true
for pos, seg := range segs {
if seg != "*" && seg != "?" {
if due, batch[i].Err = g.C.CheckDue(seg, pos); !due || batch[i].Err != nil {
break
}
}
}
batch[i].Due = due
cache[key] = batch[i]
}
return batch
}

View File

@ -1,6 +1,7 @@
package gronx
import (
"fmt"
"strconv"
"strings"
"time"
@ -30,26 +31,25 @@ func (c *SegmentChecker) SetRef(ref time.Time) {
// CheckDue checks if the cron segment at given position is due.
// It returns bool or error if any.
func (c *SegmentChecker) CheckDue(segment string, pos int) (bool, error) {
func (c *SegmentChecker) CheckDue(segment string, pos int) (due bool, err error) {
ref, last := c.GetRef(), -1
val, loc := valueByPos(ref, pos), ref.Location()
isMonth, isWeekDay := pos == 3, pos == 5
for _, offset := range strings.Split(segment, ",") {
mod := pos == 2 || pos == 4
due, err := c.isOffsetDue(offset, val, pos)
if due || (!mod && err != nil) {
return due, err
mod := (isMonth || isWeekDay) && strings.ContainsAny(offset, "LW#")
if due, err = c.isOffsetDue(offset, val, pos); due || (!mod && err != nil) {
return
}
if mod && !strings.ContainsAny(offset, "LW#") {
if !mod {
continue
}
if last == -1 {
last = time.Date(ref.Year(), ref.Month(), 1, 0, 0, 0, 0, loc).AddDate(0, 1, 0).Add(-time.Nanosecond).Day()
}
if pos == 2 {
if isMonth {
due, err = isValidMonthDay(offset, last, ref)
} else if pos == 4 {
} else if isWeekDay {
due, err = isValidWeekDay(offset, last, ref)
}
if due || err != nil {
@ -64,17 +64,19 @@ func (c *SegmentChecker) isOffsetDue(offset string, val, pos int) (bool, error)
if offset == "*" || offset == "?" {
return true, nil
}
bounds, isWeekDay := boundsByPos(pos), pos == 5
if strings.Contains(offset, "/") {
return inStep(val, offset)
return inStep(val, offset, bounds)
}
if strings.Contains(offset, "-") {
if pos == 4 {
if isWeekDay {
offset = strings.Replace(offset, "7-", "0-", 1)
}
return inRange(val, offset)
return inRange(val, offset, bounds)
}
if pos != 4 && (val == 0 || offset == "0") {
if !isWeekDay && (val == 0 || offset == "0") {
return offset == "0" && val == 0, nil
}
@ -82,29 +84,48 @@ func (c *SegmentChecker) isOffsetDue(offset string, val, pos int) (bool, error)
if err != nil {
return false, err
}
if pos == 4 && nval == 7 {
nval = 0
if nval < bounds[0] || nval > bounds[1] {
return false, fmt.Errorf("segment#%d: '%s' out of bounds(%d, %d)", pos, offset, bounds[0], bounds[1])
}
return nval == val, nil
return nval == val || (isWeekDay && nval == 7 && val == 0), nil
}
func valueByPos(ref time.Time, pos int) int {
func valueByPos(ref time.Time, pos int) (val int) {
switch pos {
case 0:
return ref.Minute()
val = ref.Second()
case 1:
return ref.Hour()
val = ref.Minute()
case 2:
return ref.Day()
val = ref.Hour()
case 3:
return int(ref.Month())
val = ref.Day()
case 4:
return int(ref.Weekday())
val = int(ref.Month())
case 5:
return ref.Year()
val = int(ref.Weekday())
case 6:
val = ref.Year()
}
return 0
return
}
func boundsByPos(pos int) (bounds []int) {
bounds = []int{0, 0}
switch pos {
case 0, 1:
bounds = []int{0, 59}
case 2:
bounds = []int{0, 23}
case 3:
bounds = []int{1, 31}
case 4:
bounds = []int{1, 12}
case 5:
bounds = []int{0, 7}
case 6:
bounds = []int{1, 9999}
}
return
}

View File

@ -25,10 +25,13 @@ var expressions = map[string]string{
"@10minutes": "*/10 * * * *",
"@15minutes": "*/15 * * * *",
"@30minutes": "0,30 * * * *",
"@everysecond": "* * * * * *",
}
// SpaceRe is regex for whitespace.
var SpaceRe = regexp.MustCompile(`\s+`)
var yearRe = regexp.MustCompile(`\d{4}`)
func normalize(expr string) []string {
expr = strings.Trim(expr, " \t")
@ -55,11 +58,8 @@ func New() Gronx {
// IsDue checks if cron expression is due for given reference time (or now).
// It returns bool or error if any.
func (g *Gronx) IsDue(expr string, ref ...time.Time) (bool, error) {
if len(ref) > 0 {
g.C.SetRef(ref[0])
} else {
g.C.SetRef(time.Now())
}
ref = append(ref, time.Now())
g.C.SetRef(ref[0])
segs, err := Segments(expr)
if err != nil {
@ -69,12 +69,25 @@ func (g *Gronx) IsDue(expr string, ref ...time.Time) (bool, error) {
return g.SegmentsDue(segs)
}
func (g *Gronx) isDue(expr string, ref time.Time) bool {
due, err := g.IsDue(expr, ref)
return err == nil && due
}
// Segments splits expr into array array of cron parts.
// If expression contains 5 parts or 6th part is year like, it prepends a second.
// It returns array or error.
func Segments(expr string) ([]string, error) {
segs := normalize(expr)
if len(segs) < 5 || len(segs) > 6 {
return []string{}, errors.New("expr should contain 5-6 segments separated by space")
slen := len(segs)
if slen < 5 || slen > 7 {
return []string{}, errors.New("expr should contain 5-7 segments separated by space")
}
// Prepend second if required
prepend := slen == 5 || (slen == 6 && yearRe.MatchString(segs[5]))
if prepend {
segs = append([]string{"0"}, segs...)
}
return segs, nil
@ -82,8 +95,8 @@ func Segments(expr string) ([]string, error) {
// SegmentsDue checks if all cron parts are due.
// It returns bool. You should use IsDue(expr) instead.
func (g *Gronx) SegmentsDue(segments []string) (bool, error) {
for pos, seg := range segments {
func (g *Gronx) SegmentsDue(segs []string) (bool, error) {
for pos, seg := range segs {
if seg == "*" || seg == "?" {
continue
}
@ -99,7 +112,17 @@ func (g *Gronx) SegmentsDue(segments []string) (bool, error) {
// IsValid checks if cron expression is valid.
// It returns bool.
func (g *Gronx) IsValid(expr string) bool {
_, err := g.IsDue(expr)
segs, err := Segments(expr)
if err != nil {
return false
}
return err == nil
g.C.SetRef(time.Now())
for pos, seg := range segs {
if _, err := g.C.CheckDue(seg, pos); err != nil {
return false
}
}
return true
}

View File

@ -22,58 +22,64 @@ func NextTick(expr string, inclRefTime bool) (time.Time, error) {
// NextTickAfter gives next run time from the provided time.Time
func NextTickAfter(expr string, start time.Time, inclRefTime bool) (time.Time, error) {
gron, next := New(), start.Truncate(time.Minute)
gron, next := New(), start.Truncate(time.Second)
due, err := gron.IsDue(expr, start)
if err != nil || (due && inclRefTime) {
return start, err
}
segments, _ := Segments(expr)
if len(segments) > 5 && isPastYear(segments[5], next, inclRefTime) {
return next, fmt.Errorf("unreachable year segment: %s", segments[5])
if len(segments) > 6 && isUnreachableYear(segments[6], next, inclRefTime, false) {
return next, fmt.Errorf("unreachable year segment: %s", segments[6])
}
if next, err = loop(gron, segments, next, inclRefTime); err != nil {
// Ignore superfluous err
if due, _ = gron.IsDue(expr, next); due {
err = nil
}
next, err = loop(gron, segments, next, inclRefTime, false)
// Ignore superfluous err
if err != nil && gron.isDue(expr, next) {
err = nil
}
return next, err
}
func loop(gron Gronx, segments []string, start time.Time, incl bool) (next time.Time, err error) {
iter, next, bumped := 1000, start, false
func loop(gron Gronx, segments []string, start time.Time, incl bool, reverse bool) (next time.Time, err error) {
iter, next, bumped := 500, start, false
over:
for iter > 0 {
over:
iter--
for pos, seg := range segments {
if seg == "*" || seg == "?" {
continue
}
if next, bumped, err = bumpUntilDue(gron.C, seg, pos, next); bumped {
if next, bumped, err = bumpUntilDue(gron.C, seg, pos, next, reverse); bumped {
goto over
}
}
if !incl && next.Format(CronDateFormat) == start.Format(CronDateFormat) {
next, _, err = bumpUntilDue(gron.C, segments[0], 0, next.Add(time.Minute))
if !incl && next.Format(FullDateFormat) == start.Format(FullDateFormat) {
delta := time.Second
if reverse {
delta = -time.Second
}
next, _, err = bumpUntilDue(gron.C, segments[0], 0, next.Add(delta), reverse)
continue
}
return next, err
return
}
return start, errors.New("tried so hard")
}
var dashRe = regexp.MustCompile(`/.*$`)
func isPastYear(year string, ref time.Time, incl bool) bool {
func isUnreachableYear(year string, ref time.Time, incl bool, reverse bool) bool {
if year == "*" || year == "?" {
return false
}
min := ref.Year()
edge, inc := ref.Year(), 1
if !incl {
min++
if reverse {
inc = -1
}
edge += inc
}
for _, offset := range strings.Split(year, ",") {
if strings.Index(offset, "*/") == 0 || strings.Index(offset, "0/") == 0 {
@ -81,7 +87,7 @@ func isPastYear(year string, ref time.Time, incl bool) bool {
}
for _, part := range strings.Split(dashRe.ReplaceAllString(offset, ""), "-") {
val, err := strconv.Atoi(part)
if err != nil || val >= min {
if err != nil || (!reverse && val >= edge) || (reverse && val < edge) {
return false
}
}
@ -89,34 +95,41 @@ func isPastYear(year string, ref time.Time, incl bool) bool {
return true
}
var limit = map[int]int{0: 60, 1: 24, 2: 31, 3: 12, 4: 366, 5: 100}
var limit = map[int]int{0: 60, 1: 60, 2: 24, 3: 31, 4: 12, 5: 366, 6: 100}
func bumpUntilDue(c Checker, segment string, pos int, ref time.Time) (time.Time, bool, error) {
// <minute> <hour> <day> <month> <weekday>
func bumpUntilDue(c Checker, segment string, pos int, ref time.Time, reverse bool) (time.Time, bool, error) {
// <second> <minute> <hour> <day> <month> <weekday> <year>
iter := limit[pos]
for iter > 0 {
c.SetRef(ref)
if ok, _ := c.CheckDue(segment, pos); ok {
return ref, iter != limit[pos], nil
}
ref = bump(ref, pos)
ref = bump(ref, pos, reverse)
iter--
}
return ref, false, errors.New("tried so hard")
}
func bump(ref time.Time, pos int) time.Time {
func bump(ref time.Time, pos int, reverse bool) time.Time {
factor := 1
if reverse {
factor = -1
}
switch pos {
case 0:
ref = ref.Add(time.Minute)
ref = ref.Add(time.Duration(factor) * time.Second)
case 1:
ref = ref.Add(time.Hour)
case 2, 4:
ref = ref.AddDate(0, 0, 1)
case 3:
ref = ref.AddDate(0, 1, 0)
case 5:
ref = ref.AddDate(1, 0, 0)
ref = ref.Add(time.Duration(factor) * time.Minute)
case 2:
ref = ref.Add(time.Duration(factor) * time.Hour)
case 3, 5:
ref = ref.AddDate(0, 0, factor)
case 4:
ref = ref.AddDate(0, factor, 0)
case 6:
ref = ref.AddDate(factor, 0, 0)
}
return ref
}

32
vendor/github.com/adhocore/gronx/prev.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package gronx
import (
"fmt"
"time"
)
// PrevTick gives previous run time before now
func PrevTick(expr string, inclRefTime bool) (time.Time, error) {
return PrevTickBefore(expr, time.Now(), inclRefTime)
}
// PrevTickBefore gives previous run time before given reference time
func PrevTickBefore(expr string, start time.Time, inclRefTime bool) (time.Time, error) {
gron, prev := New(), start.Truncate(time.Second)
due, err := gron.IsDue(expr, start)
if err != nil || (due && inclRefTime) {
return prev, err
}
segments, _ := Segments(expr)
if len(segments) > 6 && isUnreachableYear(segments[6], prev, inclRefTime, true) {
return prev, fmt.Errorf("unreachable year segment: %s", segments[6])
}
prev, err = loop(gron, segments, prev, inclRefTime, true)
// Ignore superfluous err
if err != nil && gron.isDue(expr, prev) {
err = nil
}
return prev, err
}

View File

@ -2,12 +2,13 @@ package gronx
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
)
func inStep(val int, s string) (bool, error) {
func inStep(val int, s string, bounds []int) (bool, error) {
parts := strings.Split(s, "/")
step, err := strconv.Atoi(parts[1])
if err != nil {
@ -34,10 +35,14 @@ func inStep(val int, s string) (bool, error) {
}
}
if (len(sub) > 1 && end < start) || start < bounds[0] || end > bounds[1] {
return false, fmt.Errorf("step '%s' out of bounds(%d, %d)", parts[0], bounds[0], bounds[1])
}
return inStepRange(val, start, end, step), nil
}
func inRange(val int, s string) (bool, error) {
func inRange(val int, s string, bounds []int) (bool, error) {
parts := strings.Split(s, "-")
start, err := strconv.Atoi(parts[0])
if err != nil {
@ -49,6 +54,10 @@ func inRange(val int, s string) (bool, error) {
return false, err
}
if end < start || start < bounds[0] || end > bounds[1] {
return false, fmt.Errorf("range '%s' out of bounds(%d, %d)", s, bounds[0], bounds[1])
}
return start <= val && val <= end, nil
}
@ -61,7 +70,7 @@ func inStepRange(val, start, end, step int) bool {
return false
}
func isValidMonthDay(val string, last int, ref time.Time) (bool, error) {
func isValidMonthDay(val string, last int, ref time.Time) (valid bool, err error) {
day, loc := ref.Day(), ref.Location()
if val == "L" {
return day == last, nil
@ -84,12 +93,13 @@ func isValidMonthDay(val string, last int, ref time.Time) (bool, error) {
week := int(iref.Weekday())
if week > 0 && week < 6 && iref.Month() == ref.Month() {
return day == iref.Day(), nil
valid = day == iref.Day()
break
}
}
}
return false, nil
return valid, nil
}
func isValidWeekDay(val string, last int, ref time.Time) (bool, error) {

View File

@ -238,16 +238,17 @@ if err != nil {
For more control (particularly, if you need a different way of managing each certificate), you'll make and use a `Cache` and a `Config` like so:
```go
cache := certmagic.NewCache(certmagic.CacheOptions{
// First make a pointer to a Cache as we need to reference the same Cache in
// GetConfigForCert below.
var cache *certmagic.Cache
cache = certmagic.NewCache(certmagic.CacheOptions{
GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) {
// do whatever you need to do to get the right
// configuration for this certificate; keep in
// mind that this config value is used as a
// template, and will be completed with any
// defaults that are set in the Default config
return &certmagic.Config{
// Here we use New to get a valid Config associated with the same cache.
// The provided Config is used as a template and will be completed with
// any defaults that are set in the Default config.
return certmagic.New(cache, &certmagic.config{
// ...
}, nil
}), nil
},
...
})

View File

@ -115,6 +115,10 @@ type ACMEIssuer struct {
// desired, set this to zap.NewNop().
Logger *zap.Logger
// Set a http proxy to use when issuing a certificate.
// Default is http.ProxyFromEnvironment
HTTPProxy func(*http.Request) (*url.URL, error)
config *Config
httpClient *http.Client
@ -204,6 +208,13 @@ func NewACMEIssuer(cfg *Config, template ACMEIssuer) *ACMEIssuer {
template.Logger = defaultLogger
}
if template.HTTPProxy == nil {
template.HTTPProxy = DefaultACME.HTTPProxy
}
if template.HTTPProxy == nil {
template.HTTPProxy = http.ProxyFromEnvironment
}
template.config = cfg
template.mu = new(sync.Mutex)
@ -223,7 +234,7 @@ func NewACMEIssuer(cfg *Config, template ACMEIssuer) *ACMEIssuer {
}
}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Proxy: template.HTTPProxy,
DialContext: dialer.DialContext,
TLSHandshakeTimeout: 30 * time.Second, // increase to 30s requested in #175
ResponseHeaderTimeout: 30 * time.Second, // increase to 30s requested in #175
@ -506,9 +517,10 @@ type ChainPreference struct {
// DefaultACME specifies default settings to use for ACMEIssuers.
// Using this value is optional but can be convenient.
var DefaultACME = ACMEIssuer{
CA: LetsEncryptProductionCA,
TestCA: LetsEncryptStagingCA,
Logger: defaultLogger,
CA: LetsEncryptProductionCA,
TestCA: LetsEncryptStagingCA,
Logger: defaultLogger,
HTTPProxy: http.ProxyFromEnvironment,
}
// Some well-known CA endpoints available to use.

View File

@ -145,7 +145,8 @@ type CacheOptions struct {
// used for managing a certificate, or for accessing
// that certificate's asset storage (e.g. for
// OCSP staples, etc). The returned Config MUST
// be associated with the same Cache as the caller.
// be associated with the same Cache as the caller,
// use New to obtain a valid Config.
//
// The reason this is a callback function, dynamically
// returning a Config (instead of attaching a static
@ -197,14 +198,29 @@ func (certCache *Cache) cacheCertificate(cert Certificate) {
// This function is NOT safe for concurrent use. Callers MUST acquire
// a write lock on certCache.mu first.
func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
// no-op if this certificate already exists in the cache
if _, ok := certCache.cache[cert.hash]; ok {
certCache.logger.Debug("certificate already cached",
// if this certificate already exists in the cache, this is basically
// a no-op so we reuse existing cert (prevent duplication), but we do
// modify the cert to add tags it may be missing (see issue #211)
if existingCert, ok := certCache.cache[cert.hash]; ok {
logMsg := "certificate already cached"
if len(cert.Tags) > 0 {
for _, tag := range cert.Tags {
if !existingCert.HasTag(tag) {
existingCert.Tags = append(existingCert.Tags, tag)
}
}
certCache.cache[cert.hash] = existingCert
logMsg += "; appended any missing tags to cert"
}
certCache.logger.Debug(logMsg,
zap.Strings("subjects", cert.Names),
zap.Time("expiration", expiresAt(cert.Leaf)),
zap.Bool("managed", cert.managed),
zap.String("issuer_key", cert.issuerKey),
zap.String("hash", cert.hash))
zap.String("hash", cert.hash),
zap.Strings("tags", cert.Tags))
return
}
@ -327,7 +343,11 @@ func (certCache *Cache) getConfig(cert Certificate) (*Config, error) {
if err != nil {
return nil, err
}
if cfg.certCache != nil && cfg.certCache != certCache {
if cfg.certCache == nil {
return nil, fmt.Errorf("config returned for certificate %v has nil cache; expected %p (this one)",
cert.Names, certCache)
}
if cfg.certCache != certCache {
return nil, fmt.Errorf("config returned for certificate %v is not nil and points to different cache; got %p, expected %p (this one)",
cert.Names, cfg.certCache, certCache)
}

View File

@ -113,6 +113,9 @@ func (cert Certificate) HasTag(tag string) bool {
// resolution of ASN.1 UTCTime/GeneralizedTime by including the extra fraction
// of a second of certificate validity beyond the NotAfter value.
func expiresAt(cert *x509.Certificate) time.Time {
if cert == nil {
return time.Time{}
}
return cert.NotAfter.Truncate(time.Second).Add(1 * time.Second)
}

View File

@ -270,7 +270,16 @@ type OnDemandConfig struct {
// request will be denied.
DecisionFunc func(name string) error
// List of whitelisted hostnames (SNI values) for
// Sources for getting new, unmanaged certificates.
// They will be invoked only during TLS handshakes
// before on-demand certificate management occurs,
// for certificates that are not already loaded into
// the in-memory cache.
//
// TODO: EXPERIMENTAL: subject to change and/or removal.
Managers []Manager
// List of allowed hostnames (SNI values) for
// deferred (on-demand) obtaining of certificates.
// Used only by higher-level functions in this
// package to persist the list of hostnames that
@ -282,15 +291,15 @@ type OnDemandConfig struct {
// for higher-level convenience functions to be
// able to retain their convenience (alternative
// is: the user manually creates a DecisionFunc
// that whitelists the same names it already
// passed into Manage) and without letting clients
// have their run of any domain names they want.
// that allows the same names it already passed
// into Manage) and without letting clients have
// their run of any domain names they want.
// Only enforced if len > 0.
hostWhitelist []string
hostAllowlist []string
}
func (o *OnDemandConfig) whitelistContains(name string) bool {
for _, n := range o.hostWhitelist {
func (o *OnDemandConfig) allowlistContains(name string) bool {
for _, n := range o.hostAllowlist {
if strings.EqualFold(n, name) {
return true
}
@ -433,7 +442,7 @@ type CertificateResource struct {
// The unique string identifying the issuer of the
// certificate; internally useful for storage access.
issuerKey string `json:"-"`
issuerKey string
}
// NamesKey returns the list of SANs as a single string,

View File

@ -72,6 +72,13 @@ type Config struct {
// ClientHello's ServerName field is empty.
DefaultServerName string
// FallbackServerName specifies a server name
// to use when choosing a certificate if the
// ClientHello's ServerName field doesn't match
// any available certificate.
// EXPERIMENTAL: Subject to change or removal.
FallbackServerName string
// The state needed to operate on-demand TLS;
// if non-nil, on-demand TLS is enabled and
// certificate operations are deferred to
@ -88,15 +95,6 @@ type Config struct {
// turn until one succeeds.
Issuers []Issuer
// Sources for getting new, unmanaged certificates.
// They will be invoked only during TLS handshakes
// before on-demand certificate management occurs,
// for certificates that are not already loaded into
// the in-memory cache.
//
// TODO: EXPERIMENTAL: subject to change and/or removal.
Managers []Manager
// The source of new private keys for certificates;
// the default KeySource is StandardKeyGenerator.
KeySource KeyGenerator
@ -119,6 +117,16 @@ type Config struct {
// TLS assets. Default is the local file system.
Storage Storage
// CertMagic will verify the storage configuration
// is acceptable before obtaining a certificate
// to avoid information loss after an expensive
// operation. If you are absolutely 100% sure your
// storage is properly configured and has sufficient
// space, you can disable this check to reduce I/O
// if that is expensive for you.
// EXPERIMENTAL: Option subject to change or removal.
DisableStorageCheck bool
// Set a logger to enable logging. If not set,
// a default logger will be created.
Logger *zap.Logger
@ -162,6 +170,7 @@ func NewDefault() *Config {
GetConfigForCert: func(Certificate) (*Config, error) {
return NewDefault(), nil
},
Logger: Default.Logger,
})
}
certCache := defaultCache
@ -209,10 +218,10 @@ func newWithCache(certCache *Cache, cfg Config) *Config {
if !cfg.MustStaple {
cfg.MustStaple = Default.MustStaple
}
if len(cfg.Issuers) == 0 {
if cfg.Issuers == nil {
cfg.Issuers = Default.Issuers
if len(cfg.Issuers) == 0 {
// at least one issuer is absolutely required
if cfg.Issuers == nil {
// at least one issuer is absolutely required if not nil
cfg.Issuers = []Issuer{NewACMEIssuer(&cfg, DefaultACME)}
}
}
@ -228,6 +237,9 @@ func newWithCache(certCache *Cache, cfg Config) *Config {
if cfg.DefaultServerName == "" {
cfg.DefaultServerName = Default.DefaultServerName
}
if cfg.FallbackServerName == "" {
cfg.FallbackServerName = Default.FallbackServerName
}
if cfg.Storage == nil {
cfg.Storage = Default.Storage
}
@ -329,12 +341,13 @@ func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bo
for _, domainName := range domainNames {
// if on-demand is configured, defer obtain and renew operations
if cfg.OnDemand != nil {
if !cfg.OnDemand.whitelistContains(domainName) {
cfg.OnDemand.hostWhitelist = append(cfg.OnDemand.hostWhitelist, domainName)
if !cfg.OnDemand.allowlistContains(domainName) {
cfg.OnDemand.hostAllowlist = append(cfg.OnDemand.hostAllowlist, domainName)
}
continue
}
// TODO: consider doing this in a goroutine if async, to utilize multiple cores while loading certs
// otherwise, begin management immediately
err := cfg.manageOne(ctx, domainName, async)
if err != nil {
@ -587,6 +600,7 @@ func (cfg *Config) obtainCert(ctx context.Context, name string, interactive bool
CertificatePEM: issuedCert.Certificate,
PrivateKeyPEM: privKeyPEM,
IssuerData: issuedCert.Metadata,
issuerKey: issuerUsed.IssuerKey(),
}
err = cfg.saveCertResource(ctx, issuerUsed, certRes)
if err != nil {
@ -812,6 +826,7 @@ func (cfg *Config) renewCert(ctx context.Context, name string, force, interactiv
CertificatePEM: issuedCert.Certificate,
PrivateKeyPEM: certRes.PrivateKeyPEM,
IssuerData: issuedCert.Metadata,
issuerKey: issuerUsed.IssuerKey(),
}
err = cfg.saveCertResource(ctx, issuerUsed, newCertRes)
if err != nil {
@ -1000,6 +1015,9 @@ func (cfg *Config) getChallengeInfo(ctx context.Context, identifier string) (Cha
// comparing the loaded value. If this fails, the provided
// cfg.Storage mechanism should not be used.
func (cfg *Config) checkStorage(ctx context.Context) error {
if cfg.DisableStorageCheck {
return nil
}
key := fmt.Sprintf("rw_test_%d", weakrand.Int())
contents := make([]byte, 1024*10) // size sufficient for one or two ACME resources
_, err := weakrand.Read(contents)

View File

@ -237,7 +237,7 @@ func checkDNSPropagation(fqdn, value string, resolvers []string) (bool, error) {
// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record.
func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) {
for _, ns := range nameservers {
r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false)
r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, true)
if err != nil {
return false, err
}

View File

@ -42,9 +42,15 @@ import (
// 5. Issuers (if on-demand is enabled)
//
// This method is safe for use as a tls.Config.GetCertificate callback.
//
// GetCertificate will run in a new context, use GetCertificateWithContext to provide
// a context.
func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
ctx := context.TODO() // TODO: get a proper context? from somewhere...
return cfg.GetCertificateWithContext(ctx, clientHello)
}
func (cfg *Config) GetCertificateWithContext(ctx context.Context, clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
if err := cfg.emit(ctx, "tls_get_certificate", map[string]any{"client_hello": clientHello}); err != nil {
cfg.Logger.Error("TLS handshake aborted by event handler",
zap.String("server_name", clientHello.ServerName),
@ -60,6 +66,7 @@ func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certif
challengeCert, distributed, err := cfg.getTLSALPNChallengeCert(clientHello)
if err != nil {
cfg.Logger.Error("tls-alpn challenge",
zap.String("remote_addr", clientHello.Conn.RemoteAddr().String()),
zap.String("server_name", clientHello.ServerName),
zap.Error(err))
return nil, err
@ -74,7 +81,7 @@ func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certif
}
// get the certificate and serve it up
cert, err := cfg.getCertDuringHandshake(ctx, clientHello, true, true)
cert, err := cfg.getCertDuringHandshake(ctx, clientHello, true)
return &cert.Certificate, err
}
@ -136,6 +143,20 @@ func (cfg *Config) getCertificateFromCache(hello *tls.ClientHelloInfo) (cert Cer
}
}
// a fallback server name can be tried in the very niche
// case where a client sends one SNI value but expects or
// accepts a different one in return (this is sometimes
// the case with CDNs like Cloudflare that send the
// downstream ServerName in the handshake but accept
// the backend origin's true hostname in a cert).
if cfg.FallbackServerName != "" {
normFallback := normalizedName(cfg.FallbackServerName)
cert, defaulted = cfg.selectCert(hello, normFallback)
if defaulted {
return
}
}
// otherwise, we're bingo on ammo; see issues
// caddyserver/caddy#2035 and caddyserver/caddy#1303 (any
// change to certificate matching behavior must
@ -232,18 +253,19 @@ func DefaultCertificateSelector(hello *tls.ClientHelloInfo, choices []Certificat
// An error will be returned if and only if no certificate is available.
//
// This function is safe for concurrent use.
func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.ClientHelloInfo, loadIfNecessary, obtainIfNecessary bool) (Certificate, error) {
log := logWithRemote(cfg.Logger.Named("handshake"), hello)
func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.ClientHelloInfo, loadOrObtainIfNecessary bool) (Certificate, error) {
logger := logWithRemote(cfg.Logger.Named("handshake"), hello)
name := cfg.getNameFromClientHello(hello)
// First check our in-memory cache to see if we've already loaded it
cert, matched, defaulted := cfg.getCertificateFromCache(hello)
if matched {
log.Debug("matched certificate in cache",
logger.Debug("matched certificate in cache",
zap.Strings("subjects", cert.Names),
zap.Bool("managed", cert.managed),
zap.Time("expiration", expiresAt(cert.Leaf)),
zap.String("hash", cert.hash))
if cert.managed && cfg.OnDemand != nil && obtainIfNecessary {
if cert.managed && cfg.OnDemand != nil && loadOrObtainIfNecessary {
// On-demand certificates are maintained in the background, but
// maintenance is triggered by handshakes instead of by a timer
// as in maintain.go.
@ -252,9 +274,52 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client
return cert, nil
}
// By this point, we need to load or obtain a certificate. If a swarm of requests comes in for the same
// domain, avoid pounding manager or storage thousands of times simultaneously. We do a similar sync
// strategy for obtaining certificate during handshake.
certLoadWaitChansMu.Lock()
wait, ok := certLoadWaitChans[name]
if ok {
// another goroutine is already loading the cert; just wait and we'll get it from the in-memory cache
certLoadWaitChansMu.Unlock()
timeout := time.NewTimer(2 * time.Minute) // TODO: have Caddy use the context param to establish a timeout
select {
case <-timeout.C:
return Certificate{}, fmt.Errorf("timed out waiting to load certificate for %s", name)
case <-ctx.Done():
timeout.Stop()
return Certificate{}, ctx.Err()
case <-wait:
timeout.Stop()
}
return cfg.getCertDuringHandshake(ctx, hello, false)
} else {
// no other goroutine is currently trying to load this cert
wait = make(chan struct{})
certLoadWaitChans[name] = wait
certLoadWaitChansMu.Unlock()
// unblock others and clean up when we're done
defer func() {
certLoadWaitChansMu.Lock()
close(wait)
delete(certLoadWaitChans, name)
certLoadWaitChansMu.Unlock()
}()
}
// Make sure a certificate is allowed for the given name. If not, it doesn't
// make sense to try loading one from storage (issue #185), getting it from a
// certificate manager, or obtaining one from an issuer.
if err := cfg.checkIfCertShouldBeObtained(name, false); err != nil {
return Certificate{}, fmt.Errorf("certificate is not allowed for server name %s: %v", name, err)
}
// If an external Manager is configured, try to get it from them.
// Only continue to use our own logic if it returns empty+nil.
externalCert, err := cfg.getCertFromAnyCertManager(ctx, hello, log)
externalCert, err := cfg.getCertFromAnyCertManager(ctx, hello, logger)
if err != nil {
return Certificate{}, err
}
@ -262,8 +327,6 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client
return externalCert, nil
}
name := cfg.getNameFromClientHello(hello)
// We might be able to load or obtain a needed certificate. Load from
// storage if OnDemand is enabled, or if there is the possibility that
// a statically-managed cert was evicted from a full cache.
@ -282,42 +345,25 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client
cacheAlmostFull := cacheCapacity > 0 && float64(cacheSize) >= cacheCapacity*.9
loadDynamically := cfg.OnDemand != nil || cacheAlmostFull
if loadDynamically && loadIfNecessary {
// Then check to see if we have one on disk
// TODO: As suggested here, https://caddy.community/t/error-tls-alert-internal-error-592-again/13272/30?u=matt,
// it might be a good idea to check with the DecisionFunc or allowlist first before even loading the certificate
// from storage, since if we can't renew it, why should we even try serving it (it will just get evicted after
// we get a return value of false anyway)? See issue #174
loadedCert, err := cfg.CacheManagedCertificate(ctx, name)
if errors.Is(err, fs.ErrNotExist) {
// If no exact match, try a wildcard variant, which is something we can still use
labels := strings.Split(name, ".")
labels[0] = "*"
loadedCert, err = cfg.CacheManagedCertificate(ctx, strings.Join(labels, "."))
}
if loadDynamically && loadOrObtainIfNecessary {
// Check to see if we have one on disk
loadedCert, err := cfg.loadCertFromStorage(ctx, logger, hello)
if err == nil {
log.Debug("loaded certificate from storage",
zap.Strings("subjects", loadedCert.Names),
zap.Bool("managed", loadedCert.managed),
zap.Time("expiration", expiresAt(loadedCert.Leaf)),
zap.String("hash", loadedCert.hash))
loadedCert, err = cfg.handshakeMaintenance(ctx, hello, loadedCert)
if err != nil {
log.Error("maintaining newly-loaded certificate",
zap.String("server_name", name),
zap.Error(err))
}
return loadedCert, nil
}
if cfg.OnDemand != nil && obtainIfNecessary {
logger.Debug("did not load cert from storage",
zap.String("server_name", hello.ServerName),
zap.Error(err))
if cfg.OnDemand != nil {
// By this point, we need to ask the CA for a certificate
return cfg.obtainOnDemandCertificate(ctx, hello)
}
return loadedCert, nil
}
// Fall back to the default certificate if there is one
// Fall back to another certificate if there is one (either DefaultServerName or FallbackServerName)
if defaulted {
log.Debug("fell back to default certificate",
logger.Debug("fell back to default certificate",
zap.Strings("subjects", cert.Names),
zap.Bool("managed", cert.managed),
zap.Time("expiration", expiresAt(cert.Leaf)),
@ -325,19 +371,44 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client
return cert, nil
}
log.Debug("no certificate matching TLS ClientHello",
logger.Debug("no certificate matching TLS ClientHello",
zap.String("server_name", hello.ServerName),
zap.String("remote", hello.Conn.RemoteAddr().String()),
zap.String("identifier", name),
zap.Uint16s("cipher_suites", hello.CipherSuites),
zap.Float64("cert_cache_fill", float64(cacheSize)/cacheCapacity), // may be approximate! because we are not within the lock
zap.Bool("load_if_necessary", loadIfNecessary),
zap.Bool("obtain_if_necessary", obtainIfNecessary),
zap.Bool("load_or_obtain_if_necessary", loadOrObtainIfNecessary),
zap.Bool("on_demand", cfg.OnDemand != nil))
return Certificate{}, fmt.Errorf("no certificate available for '%s'", name)
}
func (cfg *Config) loadCertFromStorage(ctx context.Context, logger *zap.Logger, hello *tls.ClientHelloInfo) (Certificate, error) {
name := normalizedName(hello.ServerName)
loadedCert, err := cfg.CacheManagedCertificate(ctx, name)
if errors.Is(err, fs.ErrNotExist) {
// If no exact match, try a wildcard variant, which is something we can still use
labels := strings.Split(name, ".")
labels[0] = "*"
loadedCert, err = cfg.CacheManagedCertificate(ctx, strings.Join(labels, "."))
}
if err != nil {
return Certificate{}, fmt.Errorf("no matching certificate to load for %s: %w", name, err)
}
logger.Debug("loaded certificate from storage",
zap.Strings("subjects", loadedCert.Names),
zap.Bool("managed", loadedCert.managed),
zap.Time("expiration", expiresAt(loadedCert.Leaf)),
zap.String("hash", loadedCert.hash))
loadedCert, err = cfg.handshakeMaintenance(ctx, hello, loadedCert)
if err != nil {
logger.Error("maintaining newly-loaded certificate",
zap.String("server_name", name),
zap.Error(err))
}
return loadedCert, nil
}
// optionalMaintenance will perform maintenance on the certificate (if necessary) and
// will return the resulting certificate. This should only be done if the certificate
// is managed, OnDemand is enabled, and the scope is allowed to obtain certificates.
@ -363,19 +434,23 @@ func (cfg *Config) optionalMaintenance(ctx context.Context, log *zap.Logger, cer
// checkIfCertShouldBeObtained checks to see if an on-demand TLS certificate
// should be obtained for a given domain based upon the config settings. If
// a non-nil error is returned, do not issue a new certificate for name.
func (cfg *Config) checkIfCertShouldBeObtained(name string) error {
if cfg.OnDemand == nil {
func (cfg *Config) checkIfCertShouldBeObtained(name string, requireOnDemand bool) error {
if requireOnDemand && cfg.OnDemand == nil {
return fmt.Errorf("not configured for on-demand certificate issuance")
}
if !SubjectQualifiesForCert(name) {
return fmt.Errorf("subject name does not qualify for certificate: %s", name)
}
if cfg.OnDemand.DecisionFunc != nil {
return cfg.OnDemand.DecisionFunc(name)
}
if len(cfg.OnDemand.hostWhitelist) > 0 &&
!cfg.OnDemand.whitelistContains(name) {
return fmt.Errorf("certificate for '%s' is not managed", name)
if cfg.OnDemand != nil {
if cfg.OnDemand.DecisionFunc != nil {
if err := cfg.OnDemand.DecisionFunc(name); err != nil {
return fmt.Errorf("decision func: %w", err)
}
return nil
}
if len(cfg.OnDemand.hostAllowlist) > 0 && !cfg.OnDemand.allowlistContains(name) {
return fmt.Errorf("certificate for '%s' is not managed", name)
}
}
return nil
}
@ -390,11 +465,6 @@ func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.Cli
name := cfg.getNameFromClientHello(hello)
getCertWithoutReobtaining := func() (Certificate, error) {
// very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely
return cfg.getCertDuringHandshake(ctx, hello, true, false)
}
// We must protect this process from happening concurrently, so synchronize.
obtainCertWaitChansMu.Lock()
wait, ok := obtainCertWaitChans[name]
@ -412,7 +482,7 @@ func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.Cli
timeout.Stop()
}
return getCertWithoutReobtaining()
return cfg.loadCertFromStorage(ctx, log, hello)
}
// looks like it's up to us to do all the work and obtain the cert.
@ -428,13 +498,6 @@ func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.Cli
obtainCertWaitChansMu.Unlock()
}
// Make sure the certificate should be obtained based on config
err := cfg.checkIfCertShouldBeObtained(name)
if err != nil {
unblockWaiters()
return Certificate{}, err
}
log.Info("obtaining new certificate", zap.String("server_name", name))
// TODO: we are only adding a timeout because we don't know if the context passed in is actually cancelable...
@ -444,7 +507,7 @@ func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.Cli
defer cancel()
// Obtain the certificate
err = cfg.ObtainCertAsync(ctx, name)
err := cfg.ObtainCertAsync(ctx, name)
// immediately unblock anyone waiting for it; doing this in
// a defer would risk deadlock because of the recursive call
@ -458,7 +521,7 @@ func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.Cli
// success; certificate was just placed on disk, so
// we need only restart serving the certificate
return getCertWithoutReobtaining()
return cfg.loadCertFromStorage(ctx, log, hello)
}
// handshakeMaintenance performs a check on cert for expiration and OCSP validity.
@ -512,6 +575,16 @@ func (cfg *Config) handshakeMaintenance(ctx context.Context, hello *tls.ClientHe
// Check cert expiration
if currentlyInRenewalWindow(cert.Leaf.NotBefore, expiresAt(cert.Leaf), cfg.RenewalWindowRatio) {
// Check if the certificate still exists on disk. If not, we need to obtain a new one.
// This can happen if the certificate was cleaned up by the storage cleaner, but still
// remains in the in-memory cache.
if !cfg.storageHasCertResourcesAnyIssuer(ctx, cert.Names[0]) {
log.Debug("certificate not found on disk; obtaining new certificate",
zap.Strings("identifiers", cert.Names))
return cfg.obtainOnDemandCertificate(ctx, hello)
}
// Otherwise, renew the certificate.
return cfg.renewDynamicCertificate(ctx, hello, cert)
}
@ -536,11 +609,6 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien
timeLeft := time.Until(expiresAt(currentCert.Leaf))
revoked := currentCert.ocsp != nil && currentCert.ocsp.Status == ocsp.Revoked
getCertWithoutReobtaining := func() (Certificate, error) {
// very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely
return cfg.getCertDuringHandshake(ctx, hello, true, false)
}
// see if another goroutine is already working on this certificate
obtainCertWaitChansMu.Lock()
wait, ok := obtainCertWaitChans[name]
@ -575,7 +643,7 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien
timeout.Stop()
}
return getCertWithoutReobtaining()
return cfg.loadCertFromStorage(ctx, log, hello)
}
// looks like it's up to us to do all the work and renew the cert
@ -602,10 +670,8 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien
renewAndReload := func(ctx context.Context, cancel context.CancelFunc) (Certificate, error) {
defer cancel()
log.Info("attempting certificate renewal")
// Make sure a certificate for this name should be obtained on-demand
err := cfg.checkIfCertShouldBeObtained(name)
// Make sure a certificate for this name should be renewed on-demand
err := cfg.checkIfCertShouldBeObtained(name, true)
if err != nil {
// if not, remove from cache (it will be deleted from storage later)
cfg.certCache.mu.Lock()
@ -620,6 +686,8 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien
return Certificate{}, err
}
log.Info("attempting certificate renewal")
// otherwise, renew with issuer, etc.
var newCert Certificate
if revoked {
@ -650,7 +718,7 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien
return newCert, err
}
return getCertWithoutReobtaining()
return cfg.loadCertFromStorage(ctx, log, hello)
}
// if the certificate hasn't expired, we can serve what we have and renew in the background
@ -668,20 +736,20 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien
// getCertFromAnyCertManager gets a certificate from cfg's Managers. If there are no Managers defined, this is
// a no-op that returns empty values. Otherwise, it gets a certificate for hello from the first Manager that
// returns a certificate and no error.
func (cfg *Config) getCertFromAnyCertManager(ctx context.Context, hello *tls.ClientHelloInfo, log *zap.Logger) (Certificate, error) {
func (cfg *Config) getCertFromAnyCertManager(ctx context.Context, hello *tls.ClientHelloInfo, logger *zap.Logger) (Certificate, error) {
// fast path if nothing to do
if len(cfg.Managers) == 0 {
if cfg.OnDemand == nil || len(cfg.OnDemand.Managers) == 0 {
return Certificate{}, nil
}
var upstreamCert *tls.Certificate
// try all the GetCertificate methods on external managers; use first one that returns a certificate
for i, certManager := range cfg.Managers {
for i, certManager := range cfg.OnDemand.Managers {
var err error
upstreamCert, err = certManager.GetCertificate(ctx, hello)
if err != nil {
log.Error("getting certificate from external certificate manager",
logger.Error("getting certificate from external certificate manager",
zap.String("sni", hello.ServerName),
zap.Int("cert_manager", i),
zap.Error(err))
@ -692,7 +760,7 @@ func (cfg *Config) getCertFromAnyCertManager(ctx context.Context, hello *tls.Cli
}
}
if upstreamCert == nil {
log.Debug("all external certificate managers yielded no certificates and no errors", zap.String("sni", hello.ServerName))
logger.Debug("all external certificate managers yielded no certificates and no errors", zap.String("sni", hello.ServerName))
return Certificate{}, nil
}
@ -702,7 +770,7 @@ func (cfg *Config) getCertFromAnyCertManager(ctx context.Context, hello *tls.Cli
return Certificate{}, fmt.Errorf("external certificate manager: %s: filling cert from leaf: %v", hello.ServerName, err)
}
log.Debug("using externally-managed certificate",
logger.Debug("using externally-managed certificate",
zap.String("sni", hello.ServerName),
zap.Strings("names", cert.Names),
zap.Time("expiration", expiresAt(cert.Leaf)))
@ -792,5 +860,11 @@ func normalizedName(serverName string) string {
}
// obtainCertWaitChans is used to coordinate obtaining certs for each hostname.
var obtainCertWaitChans = make(map[string]chan struct{})
var obtainCertWaitChansMu sync.Mutex
var (
obtainCertWaitChans = make(map[string]chan struct{})
obtainCertWaitChansMu sync.Mutex
)
var (
certLoadWaitChans = make(map[string]chan struct{})
certLoadWaitChansMu sync.Mutex
)

View File

@ -75,6 +75,8 @@ func (am *ACMEIssuer) distributedHTTPChallengeSolver(w http.ResponseWriter, r *h
if err != nil {
am.Logger.Error("looking up info for HTTP challenge",
zap.String("host", host),
zap.String("remote_addr", r.RemoteAddr),
zap.String("user_agent", r.Header.Get("User-Agent")),
zap.Error(err))
return false
}

View File

@ -33,7 +33,7 @@ func NewRateLimiter(maxEvents int, window time.Duration) *RingBufferRateLimiter
panic("maxEvents cannot be less than zero")
}
if maxEvents == 0 && window != 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
panic("NewRateLimiter: invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
rbrl := &RingBufferRateLimiter{
window: window,
@ -144,14 +144,15 @@ func (r *RingBufferRateLimiter) MaxEvents() int {
// the oldest events will be forgotten. If the new limit is
// higher, the window will suddenly have capacity for new
// reservations. It panics if maxEvents is 0 and window size
// is not zero.
// is not zero; if setting both the events limit and the
// window size to 0, call SetWindow() first.
func (r *RingBufferRateLimiter) SetMaxEvents(maxEvents int) {
newRing := make([]time.Time, maxEvents)
r.mu.Lock()
defer r.mu.Unlock()
if r.window != 0 && maxEvents == 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
panic("SetMaxEvents: invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
// only make the change if the new limit is different
@ -203,7 +204,7 @@ func (r *RingBufferRateLimiter) SetWindow(window time.Duration) {
r.mu.Lock()
defer r.mu.Unlock()
if window != 0 && len(r.ring) == 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
panic("SetWindow: invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
r.window = window
}

View File

@ -12,7 +12,7 @@ This project adheres to the [Contributor Covenant 1.2.](https://www.contributor-
## Reporting issues
Reporting issues are a great way to contribute to the project. We are perpetually grateful about a well-written, thorough bug report.
Reporting issues are a great way to contribute to the project. We are perpetually grateful about a well-written, through bug report.
Before raising a new issue, check our [issue list](https://github.com/casbin/casbin/issues) to determine if it already contains the problem that you are facing.

View File

@ -56,12 +56,12 @@ func (r *Role) removeRole(role *Role) {
role.removeUser(r)
}
//should only be called inside addRole
// should only be called inside addRole
func (r *Role) addUser(user *Role) {
r.users.Store(user.name, user)
}
//should only be called inside removeRole
// should only be called inside removeRole
func (r *Role) removeUser(user *Role) {
r.users.Delete(user.name)
}
@ -201,18 +201,14 @@ func (rm *RoleManagerImpl) rebuild() {
}
func (rm *RoleManagerImpl) Match(str string, pattern string) bool {
cacheKey := strings.Join([]string{str, pattern}, "$$")
if v, has := rm.matchingFuncCache.Get(cacheKey); has {
return v.(bool)
if str == pattern {
return true
}
if rm.matchingFunc != nil {
return rm.matchingFunc(str, pattern)
} else {
var matched bool
if rm.matchingFunc != nil {
matched = rm.matchingFunc(str, pattern)
} else {
matched = str == pattern
}
rm.matchingFuncCache.Put(cacheKey, matched)
return matched
return false
}
}
@ -493,7 +489,7 @@ func (dm *DomainManager) rebuild() {
})
}
//Clear clears all stored data and resets the role manager to the initial state.
// Clear clears all stored data and resets the role manager to the initial state.
func (dm *DomainManager) Clear() error {
dm.rmMap = &sync.Map{}
dm.matchingFuncCache = util.NewSyncLRUCache(100)
@ -512,18 +508,14 @@ func (dm *DomainManager) getDomain(domains ...string) (domain string, err error)
}
func (dm *DomainManager) Match(str string, pattern string) bool {
cacheKey := strings.Join([]string{str, pattern}, "$$")
if v, has := dm.matchingFuncCache.Get(cacheKey); has {
return v.(bool)
if str == pattern {
return true
}
if dm.domainMatchingFunc != nil {
return dm.domainMatchingFunc(str, pattern)
} else {
var matched bool
if dm.domainMatchingFunc != nil {
matched = dm.domainMatchingFunc(str, pattern)
} else {
matched = str == pattern
}
dm.matchingFuncCache.Put(cacheKey, matched)
return matched
return false
}
}

View File

@ -42,7 +42,7 @@ The parts that are implemented are based on what has been published in the SRT R
## Requirements
A Go version of 1.16+ is required.
A Go version of 1.18+ is required.
## Installation

View File

@ -218,17 +218,17 @@ func DefaultConfig() Config {
// UnmarshalURL takes a SRT URL and parses out the configuration. A SRT URL is
// srt://[host]:[port]?[key1]=[value1]&[key2]=[value2]...
func (c *Config) UnmarshalURL(addr string) (string, error) {
func (c *Config) UnmarshalURL(addr string) (string, string, error) {
u, err := url.Parse(addr)
if err != nil {
return "", err
return "", "", err
}
if u.Scheme != "srt" {
return "", fmt.Errorf("the URL doesn't seem to be an srt:// URL")
return "", "", fmt.Errorf("the URL doesn't seem to be an srt:// URL")
}
return u.Host, c.UnmarshalQuery(u.RawQuery)
return u.Hostname(), u.Port(), c.UnmarshalQuery(u.RawQuery)
}
// UnmarshalQuery parses a query string and interprets it as a configuration

View File

@ -633,69 +633,71 @@ func (c *srtConn) handlePacket(p packet.Packet) {
c.handleKMResponse(p)
}
}
return
}
if header.PacketSequenceNumber.Gt(c.debug.expectedRcvPacketSequenceNumber) {
c.log("connection:error", func() string {
return fmt.Sprintf("recv lost packets. got: %d, expected: %d (%d)\n", header.PacketSequenceNumber.Val(), c.debug.expectedRcvPacketSequenceNumber.Val(), c.debug.expectedRcvPacketSequenceNumber.Distance(header.PacketSequenceNumber))
})
}
c.debug.expectedRcvPacketSequenceNumber = header.PacketSequenceNumber.Inc()
//fmt.Printf("%s\n", p.String())
// Ignore FEC filter control packets
// https://github.com/Haivision/srt/blob/master/docs/features/packet-filtering-and-fec.md
// "An FEC control packet is distinguished from a regular data packet by having
// its message number equal to 0. This value isn't normally used in SRT (message
// numbers start from 1, increment to a maximum, and then roll back to 1)."
if header.MessageNumber == 0 {
c.log("connection:filter", func() string { return "dropped FEC filter control packet" })
return
}
// 4.5.1.1. TSBPD Time Base Calculation
if !c.tsbpdWrapPeriod {
if header.Timestamp > packet.MAX_TIMESTAMP-(30*1000000) {
c.tsbpdWrapPeriod = true
c.log("connection:tsbpd", func() string { return "TSBPD wrapping period started" })
}
} else {
if header.PacketSequenceNumber.Gt(c.debug.expectedRcvPacketSequenceNumber) {
c.log("connection:error", func() string {
return fmt.Sprintf("recv lost packets. got: %d, expected: %d (%d)\n", header.PacketSequenceNumber.Val(), c.debug.expectedRcvPacketSequenceNumber.Val(), c.debug.expectedRcvPacketSequenceNumber.Distance(header.PacketSequenceNumber))
})
if header.Timestamp >= (30*1000000) && header.Timestamp <= (60*1000000) {
c.tsbpdWrapPeriod = false
c.tsbpdTimeBaseOffset += uint64(packet.MAX_TIMESTAMP) + 1
c.log("connection:tsbpd", func() string { return "TSBPD wrapping period finished" })
}
}
c.debug.expectedRcvPacketSequenceNumber = header.PacketSequenceNumber.Inc()
//fmt.Printf("%s\n", p.String())
// Ignore FEC filter control packets
// https://github.com/Haivision/srt/blob/master/docs/features/packet-filtering-and-fec.md
// "An FEC control packet is distinguished from a regular data packet by having
// its message number equal to 0. This value isn't normally used in SRT (message
// numbers start from 1, increment to a maximum, and then roll back to 1)."
if header.MessageNumber == 0 {
c.log("connection:filter", func() string { return "dropped FEC filter control packet" })
return
tsbpdTimeBaseOffset := c.tsbpdTimeBaseOffset
if c.tsbpdWrapPeriod {
if header.Timestamp < (30 * 1000000) {
tsbpdTimeBaseOffset += uint64(packet.MAX_TIMESTAMP) + 1
}
}
// 4.5.1.1. TSBPD Time Base Calculation
if !c.tsbpdWrapPeriod {
if header.Timestamp > packet.MAX_TIMESTAMP-(30*1000000) {
c.tsbpdWrapPeriod = true
c.log("connection:tsbpd", func() string { return "TSBPD wrapping period started" })
}
} else {
if header.Timestamp >= (30*1000000) && header.Timestamp <= (60*1000000) {
c.tsbpdWrapPeriod = false
c.tsbpdTimeBaseOffset += uint64(packet.MAX_TIMESTAMP) + 1
c.log("connection:tsbpd", func() string { return "TSBPD wrapping period finished" })
}
}
header.PktTsbpdTime = c.tsbpdTimeBase + tsbpdTimeBaseOffset + uint64(header.Timestamp) + c.tsbpdDelay + c.tsbpdDrift
tsbpdTimeBaseOffset := c.tsbpdTimeBaseOffset
if c.tsbpdWrapPeriod {
if header.Timestamp < (30 * 1000000) {
tsbpdTimeBaseOffset += uint64(packet.MAX_TIMESTAMP) + 1
}
}
c.log("data:recv:dump", func() string { return p.Dump() })
header.PktTsbpdTime = c.tsbpdTimeBase + tsbpdTimeBaseOffset + uint64(header.Timestamp) + c.tsbpdDelay + c.tsbpdDrift
c.log("data:recv:dump", func() string { return p.Dump() })
c.cryptoLock.Lock()
if c.crypto != nil {
if header.KeyBaseEncryptionFlag != 0 {
if err := c.crypto.EncryptOrDecryptPayload(p.Data(), header.KeyBaseEncryptionFlag, header.PacketSequenceNumber.Val()); err != nil {
c.statistics.pktRecvUndecrypt++
c.statistics.byteRecvUndecrypt += p.Len()
}
} else {
c.cryptoLock.Lock()
if c.crypto != nil {
if header.KeyBaseEncryptionFlag != 0 {
if err := c.crypto.EncryptOrDecryptPayload(p.Data(), header.KeyBaseEncryptionFlag, header.PacketSequenceNumber.Val()); err != nil {
c.statistics.pktRecvUndecrypt++
c.statistics.byteRecvUndecrypt += p.Len()
}
} else {
c.statistics.pktRecvUndecrypt++
c.statistics.byteRecvUndecrypt += p.Len()
}
c.cryptoLock.Unlock()
// Put the packet into receive congestion control
c.recv.Push(p)
}
c.cryptoLock.Unlock()
// Put the packet into receive congestion control
c.recv.Push(p)
}
// handleKeepAlive resets the idle timeout and sends a keepalive to the peer.

View File

@ -115,7 +115,7 @@ func (s *liveSend) Push(p packet.Packet) {
return
}
// give to the packet a sequence number
// Give to the packet a sequence number
p.Header().PacketSequenceNumber = s.nextSequenceNumber
p.Header().PacketPositionFlag = packet.SinglePacket
p.Header().OrderFlag = false
@ -128,7 +128,7 @@ func (s *liveSend) Push(p packet.Packet) {
s.statistics.PktBuf++
s.statistics.ByteBuf += pktLen
// input bandwidth calculation
// Input bandwidth calculation
s.rate.bytes += pktLen
p.Header().Timestamp = uint32(p.Header().PktTsbpdTime & uint64(packet.MAX_TIMESTAMP))
@ -152,7 +152,7 @@ func (s *liveSend) Push(p packet.Packet) {
}
func (s *liveSend) Tick(now uint64) {
// deliver packets whose PktTsbpdTime is ripe
// Deliver packets whose PktTsbpdTime is ripe
s.lock.Lock()
removeList := make([]*list.Element, 0, s.packetList.Len())
for e := s.packetList.Front(); e != nil; e = e.Next() {
@ -192,7 +192,7 @@ func (s *liveSend) Tick(now uint64) {
p := e.Value.(packet.Packet)
if p.Header().PktTsbpdTime+s.dropThreshold <= now {
// dropped packet because too old
// Dropped packet because too old
s.statistics.PktDrop++
s.statistics.PktLoss++
s.statistics.ByteDrop += p.Len()
@ -245,7 +245,7 @@ func (s *liveSend) ACK(sequenceNumber circular.Number) {
for e := s.lossList.Front(); e != nil; e = e.Next() {
p := e.Value.(packet.Packet)
if p.Header().PacketSequenceNumber.Lt(sequenceNumber) {
// remove packet from buffer because it has been successfully transmitted
// Remove packet from buffer because it has been successfully transmitted
removeList = append(removeList, e)
} else {
break
@ -470,7 +470,7 @@ func (r *liveReceive) Push(pkt packet.Packet) {
r.avgPayloadSize = 0.875*r.avgPayloadSize + 0.125*float64(pktLen)
if pkt.Header().PacketSequenceNumber.Lte(r.lastDeliveredSequenceNumber) {
// too old, because up until r.lastDeliveredSequenceNumber, we already delivered
// Too old, because up until r.lastDeliveredSequenceNumber, we already delivered
r.statistics.PktBelated++
r.statistics.ByteBelated += pktLen
@ -481,7 +481,7 @@ func (r *liveReceive) Push(pkt packet.Packet) {
}
if pkt.Header().PacketSequenceNumber.Lt(r.lastACKSequenceNumber) {
// already acknowledged, ignoring
// Already acknowledged, ignoring
r.statistics.PktDrop++
r.statistics.ByteDrop += pktLen
@ -489,21 +489,21 @@ func (r *liveReceive) Push(pkt packet.Packet) {
}
if pkt.Header().PacketSequenceNumber.Equals(r.maxSeenSequenceNumber.Inc()) {
// in order, the packet we expected
// In order, the packet we expected
r.maxSeenSequenceNumber = pkt.Header().PacketSequenceNumber
} else if pkt.Header().PacketSequenceNumber.Lte(r.maxSeenSequenceNumber) {
// out of order, is it a missing piece? put it in the correct position
// Out of order, is it a missing piece? put it in the correct position
for e := r.packetList.Front(); e != nil; e = e.Next() {
p := e.Value.(packet.Packet)
if p.Header().PacketSequenceNumber == pkt.Header().PacketSequenceNumber {
// already received (has been sent more than once), ignoring
// Already received (has been sent more than once), ignoring
r.statistics.PktDrop++
r.statistics.ByteDrop += pktLen
break
} else if p.Header().PacketSequenceNumber.Gt(pkt.Header().PacketSequenceNumber) {
// late arrival, this fills a gap
// Late arrival, this fills a gap
r.statistics.PktBuf++
r.statistics.PktUnique++
@ -518,7 +518,7 @@ func (r *liveReceive) Push(pkt packet.Packet) {
return
} else {
// too far ahead, there are some missing sequence numbers, immediate NAK report
// Too far ahead, there are some missing sequence numbers, immediate NAK report
// here we can prevent a possibly unnecessary NAK with SRTO_LOXXMAXTTL
r.sendNAK(r.maxSeenSequenceNumber.Inc(), pkt.Header().PacketSequenceNumber.Dec())
@ -545,7 +545,7 @@ func (r *liveReceive) periodicACK(now uint64) (ok bool, sequenceNumber circular.
// 4.8.1. Packet Acknowledgement (ACKs, ACKACKs)
if now-r.lastPeriodicACK < r.periodicACKInterval {
if r.nPackets >= 64 {
lite = true // send light ACK
lite = true // Send light ACK
} else {
return
}
@ -555,8 +555,9 @@ func (r *liveReceive) periodicACK(now uint64) (ok bool, sequenceNumber circular.
ackSequenceNumber := r.lastDeliveredSequenceNumber
// find the sequence number up until we have all in a row.
// where the first gap is (or at the end of the list) is where we can ACK to.
// Find the sequence number up until we have all in a row.
// Where the first gap is (or at the end of the list) is where we can ACK to.
e := r.packetList.Front()
if e != nil {
p := e.Value.(packet.Packet)
@ -564,6 +565,25 @@ func (r *liveReceive) periodicACK(now uint64) (ok bool, sequenceNumber circular.
minPktTsbpdTime = p.Header().PktTsbpdTime
maxPktTsbpdTime = p.Header().PktTsbpdTime
// If there are packets that should be delivered by now, move foward.
if p.Header().PktTsbpdTime <= now {
for e = e.Next(); e != nil; e = e.Next() {
p = e.Value.(packet.Packet)
if p.Header().PktTsbpdTime > now {
break
}
}
ackSequenceNumber = p.Header().PacketSequenceNumber
maxPktTsbpdTime = p.Header().PktTsbpdTime
if e != nil {
e = e.Next()
p = e.Value.(packet.Packet)
}
}
if p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) {
ackSequenceNumber = p.Header().PacketSequenceNumber
@ -581,7 +601,7 @@ func (r *liveReceive) periodicACK(now uint64) (ok bool, sequenceNumber circular.
ok = true
sequenceNumber = ackSequenceNumber.Inc()
// keep track of the last ACK's sequence. with this we can faster ignore
// Keep track of the last ACK's sequence. with this we can faster ignore
// packets that come in that have a lower sequence number.
r.lastACKSequenceNumber = ackSequenceNumber
}
@ -602,12 +622,12 @@ func (r *liveReceive) periodicNAK(now uint64) (ok bool, from, to circular.Number
return
}
// send a periodic NAK
// Send a periodic NAK
ackSequenceNumber := r.lastDeliveredSequenceNumber
// send a NAK only for the first gap.
// alternatively send a NAK for max. X gaps because the size of the NAK packet is limited
// Send a NAK only for the first gap.
// Alternatively send a NAK for max. X gaps because the size of the NAK packet is limited.
for e := r.packetList.Front(); e != nil; e = e.Next() {
p := e.Value.(packet.Packet)
@ -638,7 +658,7 @@ func (r *liveReceive) Tick(now uint64) {
r.sendNAK(from, to)
}
// deliver packets whose PktTsbpdTime is ripe
// Deliver packets whose PktTsbpdTime is ripe
r.lock.Lock()
removeList := make([]*list.Element, 0, r.packetList.Len())
for e := r.packetList.Front(); e != nil; e = e.Next() {
@ -819,12 +839,12 @@ func (r *fakeLiveReceive) Push(pkt packet.Packet) {
r.avgPayloadSize = 0.875*r.avgPayloadSize + 0.125*float64(pktLen)
if pkt.Header().PacketSequenceNumber.Lte(r.lastDeliveredSequenceNumber) {
// too old, because up until r.lastDeliveredSequenceNumber, we already delivered
// Too old, because up until r.lastDeliveredSequenceNumber, we already delivered
return
}
if pkt.Header().PacketSequenceNumber.Lt(r.lastACKSequenceNumber) {
// already acknowledged, ignoring
// Already acknowledged, ignoring
return
}
@ -842,7 +862,7 @@ func (r *fakeLiveReceive) periodicACK(now uint64) (ok bool, sequenceNumber circu
// 4.8.1. Packet Acknowledgement (ACKs, ACKACKs)
if now-r.lastPeriodicACK < r.periodicACKInterval {
if r.nPackets >= 64 {
lite = true // send light ACK
lite = true // Send light ACK
} else {
return
}
@ -864,7 +884,7 @@ func (r *fakeLiveReceive) Tick(now uint64) {
r.sendACK(sequenceNumber, lite)
}
// deliver packets whose PktTsbpdTime is ripe
// Deliver packets whose PktTsbpdTime is ripe
r.lock.Lock()
defer r.lock.Unlock()

View File

@ -341,12 +341,21 @@ type zeroable interface {
// IsZero returns true when the value passed into the function is a zero value.
// This allows for safer checking of interface values.
func IsZero(data interface{}) bool {
v := reflect.ValueOf(data)
// check for nil data
switch v.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if v.IsNil() {
return true
}
}
// check for things that have an IsZero method instead
if vv, ok := data.(zeroable); ok {
return vv.IsZero()
}
// continue with slightly more complex reflection
v := reflect.ValueOf(data)
switch v.Kind() {
case reflect.String:
return v.Len() == 0
@ -358,14 +367,13 @@ func IsZero(data interface{}) bool {
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
case reflect.Struct, reflect.Array:
return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
case reflect.Invalid:
return true
default:
return false
}
return false
}
// AddInitialisms add additional initialisms

View File

@ -1,7 +1,7 @@
Package validator
=================
<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v10/logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-10.14.0-green.svg)
![Project status](https://img.shields.io/badge/version-10.14.1-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)

View File

@ -1414,25 +1414,21 @@ func isURL(fl FieldLevel) bool {
switch field.Kind() {
case reflect.String:
var i int
s := field.String()
// checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
// emulate browser and strip the '#' suffix prior to validation. see issue-#237
if i = strings.Index(s, "#"); i > -1 {
s = s[:i]
}
if len(s) == 0 {
return false
}
url, err := url.ParseRequestURI(s)
url, err := url.Parse(s)
if err != nil || url.Scheme == "" {
return false
}
if url.Host == "" && url.Fragment == "" && url.Opaque == "" {
return false
}
return true
}
@ -1450,7 +1446,13 @@ func isHttpURL(fl FieldLevel) bool {
case reflect.String:
s := strings.ToLower(field.String())
return strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://")
url, err := url.Parse(s)
if err != nil || url.Host == "" {
return false
}
return url.Scheme == "http" || url.Scheme == "https"
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
@ -2568,9 +2570,17 @@ func isDirPath(fl FieldLevel) bool {
func isJSON(fl FieldLevel) bool {
field := fl.Field()
if field.Kind() == reflect.String {
switch field.Kind() {
case reflect.String:
val := field.String()
return json.Valid([]byte(val))
case reflect.Slice:
fieldType := field.Type()
if fieldType.ConvertibleTo(byteSliceType) {
b := field.Convert(byteSliceType).Interface().([]byte)
return json.Valid(b)
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))

View File

@ -53,6 +53,8 @@ var (
timeDurationType = reflect.TypeOf(time.Duration(0))
timeType = reflect.TypeOf(time.Time{})
byteSliceType = reflect.TypeOf([]byte{})
defaultCField = &cField{namesEqual: true}
)

View File

@ -181,6 +181,16 @@ func (c *TwoQueueCache[K, V]) Keys() []K {
return append(k1, k2...)
}
// Values returns a slice of the values in the cache.
// The frequently used values are first in the returned slice.
func (c *TwoQueueCache[K, V]) Values() []V {
c.lock.RLock()
defer c.lock.RUnlock()
v1 := c.frequent.Values()
v2 := c.recent.Values()
return append(v1, v2...)
}
// Remove removes the provided key from the cache.
func (c *TwoQueueCache[K, V]) Remove(key K) {
c.lock.Lock()

View File

@ -15,11 +15,20 @@ Example
Using the LRU is very simple:
```go
l, _ := New[int, interface{}](128)
for i := 0; i < 256; i++ {
l.Add(i, nil)
}
if l.Len() != 128 {
panic(fmt.Sprintf("bad len: %v", l.Len()))
package main
import (
"fmt"
"github.com/hashicorp/golang-lru/v2"
)
func main() {
l, _ := lru.New[int, any](128)
for i := 0; i < 256; i++ {
l.Add(i, nil)
}
if l.Len() != 128 {
panic(fmt.Sprintf("bad len: %v", l.Len()))
}
}
```

View File

@ -1,259 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
// ARC is an enhancement over the standard LRU cache in that tracks both
// frequency and recency of use. This avoids a burst in access to new
// entries from evicting the frequently used older entries. It adds some
// additional tracking overhead to a standard LRU cache, computationally
// it is roughly 2x the cost, and the extra memory overhead is linear
// with the size of the cache. ARC has been patented by IBM, but is
// similar to the TwoQueueCache (2Q) which requires setting parameters.
type ARCCache[K comparable, V any] struct {
size int // Size is the total capacity of the cache
p int // P is the dynamic preference towards T1 or T2
t1 simplelru.LRUCache[K, V] // T1 is the LRU for recently accessed items
b1 simplelru.LRUCache[K, struct{}] // B1 is the LRU for evictions from t1
t2 simplelru.LRUCache[K, V] // T2 is the LRU for frequently accessed items
b2 simplelru.LRUCache[K, struct{}] // B2 is the LRU for evictions from t2
lock sync.RWMutex
}
// NewARC creates an ARC of the given size
func NewARC[K comparable, V any](size int) (*ARCCache[K, V], error) {
// Create the sub LRUs
b1, err := simplelru.NewLRU[K, struct{}](size, nil)
if err != nil {
return nil, err
}
b2, err := simplelru.NewLRU[K, struct{}](size, nil)
if err != nil {
return nil, err
}
t1, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
t2, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
// Initialize the ARC
c := &ARCCache[K, V]{
size: size,
p: 0,
t1: t1,
b1: b1,
t2: t2,
b2: b2,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *ARCCache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// If the value is contained in T1 (recent), then
// promote it to T2 (frequent)
if val, ok := c.t1.Peek(key); ok {
c.t1.Remove(key)
c.t2.Add(key, val)
return val, ok
}
// Check if the value is contained in T2 (frequent)
if val, ok := c.t2.Get(key); ok {
return val, ok
}
// No hit
return
}
// Add adds a value to the cache.
func (c *ARCCache[K, V]) Add(key K, value V) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is contained in T1 (recent), and potentially
// promote it to frequent T2
if c.t1.Contains(key) {
c.t1.Remove(key)
c.t2.Add(key, value)
return
}
// Check if the value is already in T2 (frequent) and update it
if c.t2.Contains(key) {
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// recently used list
if c.b1.Contains(key) {
// T1 set is too small, increase P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b2Len > b1Len {
delta = b2Len / b1Len
}
if c.p+delta >= c.size {
c.p = c.size
} else {
c.p += delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Remove from B1
c.b1.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// frequently used list
if c.b2.Contains(key) {
// T2 set is too small, decrease P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b1Len > b2Len {
delta = b1Len / b2Len
}
if delta >= c.p {
c.p = 0
} else {
c.p -= delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(true)
}
// Remove from B2
c.b2.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Keep the size of the ghost buffers trim
if c.b1.Len() > c.size-c.p {
c.b1.RemoveOldest()
}
if c.b2.Len() > c.p {
c.b2.RemoveOldest()
}
// Add to the recently seen list
c.t1.Add(key, value)
}
// replace is used to adaptively evict from either T1 or T2
// based on the current learned value of P
func (c *ARCCache[K, V]) replace(b2ContainsKey bool) {
t1Len := c.t1.Len()
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
k, _, ok := c.t1.RemoveOldest()
if ok {
c.b1.Add(k, struct{}{})
}
} else {
k, _, ok := c.t2.RemoveOldest()
if ok {
c.b2.Add(k, struct{}{})
}
}
}
// Len returns the number of cached entries
func (c *ARCCache[K, V]) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Len() + c.t2.Len()
}
// Keys returns all the cached keys
func (c *ARCCache[K, V]) Keys() []K {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.t1.Keys()
k2 := c.t2.Keys()
return append(k1, k2...)
}
// Remove is used to purge a key from the cache
func (c *ARCCache[K, V]) Remove(key K) {
c.lock.Lock()
defer c.lock.Unlock()
if c.t1.Remove(key) {
return
}
if c.t2.Remove(key) {
return
}
if c.b1.Remove(key) {
return
}
if c.b2.Remove(key) {
return
}
}
// Purge is used to clear the cache
func (c *ARCCache[K, V]) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.t1.Purge()
c.t2.Purge()
c.b1.Purge()
c.b2.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *ARCCache[K, V]) Contains(key K) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Contains(key) || c.t2.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *ARCCache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.t1.Peek(key); ok {
return val, ok
}
return c.t2.Peek(key)
}

View File

@ -3,21 +3,21 @@
// Package lru provides three different LRU caches of varying sophistication.
//
// Cache is a simple LRU cache. It is based on the
// LRU implementation in groupcache:
// https://github.com/golang/groupcache/tree/master/lru
// Cache is a simple LRU cache. It is based on the LRU implementation in
// groupcache: https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
// This avoids a burst of accesses from taking out frequently used entries,
// at the cost of about 2x computational overhead and some extra bookkeeping.
// This avoids a burst of accesses from taking out frequently used entries, at
// the cost of about 2x computational overhead and some extra bookkeeping.
//
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
// well as recent usage in both the frequent and recent caches. Its
// computational overhead is comparable to TwoQueueCache, but the memory
// overhead is linear with the size of the cache.
// ARCCache is an adaptive replacement cache. It tracks recent evictions as well
// as recent usage in both the frequent and recent caches. Its computational
// overhead is comparable to TwoQueueCache, but the memory overhead is linear
// with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
// your program.
// your program. For this reason, it is in a separate go module contained within
// this repository.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.

View File

@ -233,6 +233,14 @@ func (c *Cache[K, V]) Keys() []K {
return keys
}
// Values returns a slice of the values in the cache, from oldest to newest.
func (c *Cache[K, V]) Values() []V {
c.lock.RLock()
values := c.lru.Values()
c.lock.RUnlock()
return values
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.lock.RLock()

View File

@ -129,6 +129,17 @@ func (c *LRU[K, V]) Keys() []K {
return keys
}
// Values returns a slice of the values in the cache, from oldest to newest.
func (c *LRU[K, V]) Values() []V {
values := make([]V, len(c.items))
i := 0
for ent := c.evictList.back(); ent != nil; ent = ent.prevEntry() {
values[i] = ent.value
i++
}
return values
}
// Len returns the number of items in the cache.
func (c *LRU[K, V]) Len() int {
return c.evictList.length()

View File

@ -32,6 +32,9 @@ type LRUCache[K comparable, V any] interface {
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []K
// Values returns a slice of the values in the cache, from oldest to newest.
Values() []V
// Returns the number of items in the cache.
Len() int

View File

@ -1,19 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"crypto/rand"
"math"
"math/big"
"testing"
)
func getRand(tb testing.TB) int64 {
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
tb.Fatal(err)
}
return out.Int64()
}

View File

@ -742,7 +742,6 @@ searchDict:
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>8, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s - 1)
cv = load64(src, s)

View File

@ -157,7 +157,6 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
index0 := base + 1
index1 := s - 2
cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
@ -269,18 +268,21 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
// lTable could be postponed, but very minor difference.
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index1 -= 2
index2 += 2
}
}
@ -459,12 +461,14 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index1 -= 2
index2 += 2
}
}
@ -599,7 +603,6 @@ searchDict:
if s >= sLimit {
break searchDict
}
cv = load64(src, s)
// Index in-between
index0 := base + 1
index1 := s - 2
@ -865,12 +868,14 @@ searchDict:
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index1 -= 2
index2 += 2
}
}
@ -961,7 +966,6 @@ searchDict:
index0 := base + 1
index1 := s - 2
cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
@ -1079,12 +1083,14 @@ searchDict:
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index1 -= 2
index2 += 2
}
}

File diff suppressed because it is too large Load Diff

View File

@ -771,7 +771,7 @@ func (w *Writer) closeIndex(idx bool) ([]byte, error) {
}
var index []byte
if w.err(nil) == nil && w.writer != nil {
if w.err(err) == nil && w.writer != nil {
// Create index.
if idx {
compSize := int64(-1)

View File

@ -435,6 +435,7 @@ Exit Code 1
| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
| SYSEE | SYSENTER and SYSEXIT instructions |
| TBM | AMD Trailing Bit Manipulation |
| TDX_GUEST | Intel Trust Domain Extensions Guest |
| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |

View File

@ -226,6 +226,7 @@ const (
SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
SYSEE // SYSENTER and SYSEXIT instructions
TBM // AMD Trailing Bit Manipulation
TDX_GUEST // Intel Trust Domain Extensions Guest
TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
@ -1186,13 +1187,8 @@ func support() flagSet {
fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
// CPUID.(EAX=7, ECX=1).EDX
fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8)
fs.setIf(edx&(1<<5) != 0, AVXNECONVERT)
fs.setIf(edx&(1<<14) != 0, PREFETCHI)
// CPUID.(EAX=7, ECX=1).EAX
eax1, _, _, _ := cpuidex(7, 1)
eax1, _, _, edx1 := cpuidex(7, 1)
fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
@ -1202,6 +1198,11 @@ func support() flagSet {
fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
fs.setIf(eax1&(1<<26) != 0, LAM)
// CPUID.(EAX=7, ECX=1).EDX
fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
// Only detect AVX-512 features if XGETBV is supported
if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
// Check for OS support
@ -1393,6 +1394,13 @@ func support() flagSet {
fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
}
if mfi >= 0x21 {
// Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21).
_, ebx, ecx, edx := cpuid(0x21)
identity := string(valAsString(ebx, edx, ecx))
fs.setIf(identity == "IntelTDX ", TDX_GUEST)
}
return fs
}

View File

@ -166,59 +166,60 @@ func _() {
_ = x[SYSCALL-156]
_ = x[SYSEE-157]
_ = x[TBM-158]
_ = x[TLB_FLUSH_NESTED-159]
_ = x[TME-160]
_ = x[TOPEXT-161]
_ = x[TSCRATEMSR-162]
_ = x[TSXLDTRK-163]
_ = x[VAES-164]
_ = x[VMCBCLEAN-165]
_ = x[VMPL-166]
_ = x[VMSA_REGPROT-167]
_ = x[VMX-168]
_ = x[VPCLMULQDQ-169]
_ = x[VTE-170]
_ = x[WAITPKG-171]
_ = x[WBNOINVD-172]
_ = x[WRMSRNS-173]
_ = x[X87-174]
_ = x[XGETBV1-175]
_ = x[XOP-176]
_ = x[XSAVE-177]
_ = x[XSAVEC-178]
_ = x[XSAVEOPT-179]
_ = x[XSAVES-180]
_ = x[AESARM-181]
_ = x[ARMCPUID-182]
_ = x[ASIMD-183]
_ = x[ASIMDDP-184]
_ = x[ASIMDHP-185]
_ = x[ASIMDRDM-186]
_ = x[ATOMICS-187]
_ = x[CRC32-188]
_ = x[DCPOP-189]
_ = x[EVTSTRM-190]
_ = x[FCMA-191]
_ = x[FP-192]
_ = x[FPHP-193]
_ = x[GPA-194]
_ = x[JSCVT-195]
_ = x[LRCPC-196]
_ = x[PMULL-197]
_ = x[SHA1-198]
_ = x[SHA2-199]
_ = x[SHA3-200]
_ = x[SHA512-201]
_ = x[SM3-202]
_ = x[SM4-203]
_ = x[SVE-204]
_ = x[lastID-205]
_ = x[TDX_GUEST-159]
_ = x[TLB_FLUSH_NESTED-160]
_ = x[TME-161]
_ = x[TOPEXT-162]
_ = x[TSCRATEMSR-163]
_ = x[TSXLDTRK-164]
_ = x[VAES-165]
_ = x[VMCBCLEAN-166]
_ = x[VMPL-167]
_ = x[VMSA_REGPROT-168]
_ = x[VMX-169]
_ = x[VPCLMULQDQ-170]
_ = x[VTE-171]
_ = x[WAITPKG-172]
_ = x[WBNOINVD-173]
_ = x[WRMSRNS-174]
_ = x[X87-175]
_ = x[XGETBV1-176]
_ = x[XOP-177]
_ = x[XSAVE-178]
_ = x[XSAVEC-179]
_ = x[XSAVEOPT-180]
_ = x[XSAVES-181]
_ = x[AESARM-182]
_ = x[ARMCPUID-183]
_ = x[ASIMD-184]
_ = x[ASIMDDP-185]
_ = x[ASIMDHP-186]
_ = x[ASIMDRDM-187]
_ = x[ATOMICS-188]
_ = x[CRC32-189]
_ = x[DCPOP-190]
_ = x[EVTSTRM-191]
_ = x[FCMA-192]
_ = x[FP-193]
_ = x[FPHP-194]
_ = x[GPA-195]
_ = x[JSCVT-196]
_ = x[LRCPC-197]
_ = x[PMULL-198]
_ = x[SHA1-199]
_ = x[SHA2-200]
_ = x[SHA3-201]
_ = x[SHA512-202]
_ = x[SM3-203]
_ = x[SM4-204]
_ = x[SVE-205]
_ = x[lastID-206]
_ = x[firstID-0]
}
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1198, 1201, 1207, 1217, 1225, 1229, 1238, 1242, 1254, 1257, 1267, 1270, 1277, 1285, 1292, 1295, 1302, 1305, 1310, 1316, 1324, 1330, 1336, 1344, 1349, 1356, 1363, 1371, 1378, 1383, 1388, 1395, 1399, 1401, 1405, 1408, 1413, 1418, 1423, 1427, 1431, 1435, 1441, 1444, 1447, 1450, 1456}
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1191, 1207, 1210, 1216, 1226, 1234, 1238, 1247, 1251, 1263, 1266, 1276, 1279, 1286, 1294, 1301, 1304, 1311, 1314, 1319, 1325, 1333, 1339, 1345, 1353, 1358, 1365, 1372, 1380, 1387, 1392, 1397, 1404, 1408, 1410, 1414, 1417, 1422, 1427, 1432, 1436, 1440, 1444, 1450, 1453, 1456, 1459, 1465}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {

View File

@ -62,7 +62,7 @@ type PutObjectFanOutResponse struct {
ETag string `json:"etag,omitempty"`
VersionID string `json:"versionId,omitempty"`
LastModified *time.Time `json:"lastModified,omitempty"`
Error error `json:"error,omitempty"`
Error string `json:"error,omitempty"`
}
// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single

View File

@ -389,8 +389,9 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
headers := opts.Header()
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload
}
// Instantiate all the complete multipart buffer.

View File

@ -124,7 +124,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.55"
libraryVersion = "v7.0.57"
)
// User Agent should always following the below style.
@ -919,7 +919,7 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is
if h, p, err := net.SplitHostPort(host); err == nil {
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
host = h
if ip := net.ParseIP(h); ip != nil && ip.To16() != nil {
if ip := net.ParseIP(h); ip != nil && ip.To4() == nil {
host = "[" + h + "]"
}
}

View File

@ -2539,13 +2539,13 @@ func testTrailingChecksums() {
test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher)
// Set correct CRC.
c.TraceOn(os.Stdout)
// c.TraceOn(os.Stderr)
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO)
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
c.TraceOff()
// c.TraceOff()
cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
@ -2655,8 +2655,8 @@ func testPutObjectWithAutomaticChecksums() {
}
// Enable tracing, write to stderr.
c.TraceOn(os.Stderr)
defer c.TraceOff()
// c.TraceOn(os.Stderr)
// defer c.TraceOff()
for i, test := range tests {
bufSize := dataFileMap["datafile-10-kB"]
@ -4821,6 +4821,11 @@ func testPresignedPostPolicy() {
policy.SetContentType("binary/octet-stream")
policy.SetContentLengthRange(10, 1024*1024)
policy.SetUserMetadata(metadataKey, metadataValue)
// Add CRC32C
checksum := minio.ChecksumCRC32C.ChecksumBytes(buf)
policy.SetChecksum(checksum)
args["policy"] = policy.String()
presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy)
@ -4888,6 +4893,7 @@ func testPresignedPostPolicy() {
Timeout: 30 * time.Second,
Transport: transport,
}
args["url"] = presignedPostPolicyURL.String()
req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes()))
if err != nil {
@ -4920,13 +4926,21 @@ func testPresignedPostPolicy() {
expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName
if val, ok := res.Header["Location"]; ok {
if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
logError(testName, function, args, startTime, "", "Location in header response is incorrect", err)
if !strings.Contains(expectedLocation, "s3.amazonaws.com/") {
// Test when not against AWS S3.
if val, ok := res.Header["Location"]; ok {
if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err)
return
}
} else {
logError(testName, function, args, startTime, "", "Location not found in header response", err)
return
}
} else {
logError(testName, function, args, startTime, "", "Location not found in header response", err)
}
want := checksum.Encoded()
if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want {
logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil)
return
}

View File

@ -18,12 +18,12 @@ import (
"sort"
"strings"
"github.com/prometheus/client_golang/prometheus/internal"
"github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially

View File

@ -401,7 +401,7 @@ type HistogramOpts struct {
// Histogram by a Prometheus server with that feature enabled (requires
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
// the whole float64 range (with the exception of the “zero” bucket, see
// SparseBucketsZeroThreshold below). From any one bucket to the next,
// NativeHistogramZeroThreshold below). From any one bucket to the next,
// the width of the bucket grows by a constant
// factor. NativeHistogramBucketFactor provides an upper bound for this
// factor (exception see below). The smaller
@ -432,7 +432,7 @@ type HistogramOpts struct {
// bucket. For best results, this should be close to a bucket
// boundary. This is usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero,
// DefSparseBucketsZeroThreshold is used as the threshold. To configure
// DefNativeHistogramZeroThreshold is used as the threshold. To configure
// a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
@ -639,8 +639,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
if frac == 0.5 {
key--
}
div := 1 << -schema
key = (key + div - 1) / div
offset := (1 << -schema) - 1
key = (key + offset) >> -schema
}
if isInf {
key++
@ -817,7 +817,7 @@ func (h *histogram) observe(v float64, bucket int) {
}
}
// limitSparsebuckets applies a strategy to limit the number of populated sparse
// limitBuckets applies a strategy to limit the number of populated sparse
// buckets. It's generally best effort, and there are situations where the
// number can go higher (if even the lowest resolution isn't enough to reduce
// the number sufficiently, or if the provided counts aren't fully updated yet

View File

@ -37,6 +37,7 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
@ -47,9 +48,10 @@ import (
)
const (
contentTypeHeader = "Content-Type"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
contentTypeHeader = "Content-Type"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
processStartTimeHeader = "Process-Start-Time-Unix"
)
var gzipPool = sync.Pool{
@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if !opts.ProcessStartTime.IsZero() {
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
}
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
@ -366,6 +371,14 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
// ProcessStartTime allows setting process start timevalue that will be exposed
// with "Process-Start-Time-Unix" response header along with the metrics
// payload. This allow callers to have efficient transformations to cumulative
// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
// scrape target.
// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
// exposition format.
ProcessStartTime time.Time
}
// gzipAccepted returns whether the client will accept gzip-encoded content.

View File

@ -20,6 +20,24 @@ import (
"github.com/prometheus/common/model"
)
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
func getLabelsFromPool() Labels {
return labelsPool.Get().(Labels)
}
func putLabelsToPool(labels Labels) {
for k := range labels {
delete(labels, k)
}
labelsPool.Put(labels)
}
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
@ -93,6 +111,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
h, err := m.hashLabels(labels)
if err != nil {
return false
@ -109,6 +129,8 @@ func (m *MetricVec) Delete(labels Labels) bool {
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
return m.metricMap.deleteByLabels(labels, m.curry)
}
@ -229,6 +251,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@ -647,15 +671,16 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
}
func constrainLabels(desc *Desc, labels Labels) Labels {
constrainedValues := make(Labels, len(labels))
constrainedLabels := getLabelsFromPool()
for l, v := range labels {
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
constrainedValues[l] = desc.variableLabels[i].Constrain(v)
continue
v = desc.variableLabels[i].Constrain(v)
}
constrainedValues[l] = v
constrainedLabels[l] = v
}
return constrainedValues
return constrainedLabels
}
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {

View File

@ -11,6 +11,9 @@ func Sleep(ctx context.Context, interval time.Duration) error {
timer := time.NewTimer(interval)
select {
case <-ctx.Done():
if !timer.Stop() {
<-timer.C
}
return ctx.Err()
case <-timer.C:
return nil

View File

@ -154,13 +154,13 @@ func fillFromMeminfoWithContext() (*VirtualMemoryStat, *VirtualMemoryExStat, err
return ret, retEx, err
}
retEx.Unevictable = t * 1024
case "WriteBack":
case "Writeback":
t, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return ret, retEx, err
}
ret.WriteBack = t * 1024
case "WriteBackTmp":
case "WritebackTmp":
t, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return ret, retEx, err

View File

@ -278,7 +278,7 @@ func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackSta
return nil, common.ErrNotImplementedError
}
// NetProtoCounters returns network statistics for the entire system
// ProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Not Implemented for Darwin

View File

@ -115,7 +115,7 @@ func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackSta
return nil, common.ErrNotImplementedError
}
// NetProtoCounters returns network statistics for the entire system
// ProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Not Implemented for FreeBSD

View File

@ -157,7 +157,7 @@ var netProtocols = []string{
"udplite",
}
// NetProtoCounters returns network statistics for the entire system
// ProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Available protocols:

Some files were not shown because too many files have changed in this diff Show More