Move content encoding in the beginning of the middleware chain, update dependencies

This commit is contained in:
Ingo Oppermann 2024-10-09 14:25:42 +02:00
parent 4d6eb122b0
commit f97943b275
No known key found for this signature in database
GPG Key ID: 2AB32426E9DD229E
348 changed files with 18733 additions and 5367 deletions

View File

@ -1427,7 +1427,6 @@ func (a *api) start(ctx context.Context) error {
Password: "",
DefaultFile: "index.html",
DefaultContentType: "text/html",
Gzip: true,
Filesystem: a.diskfs,
Cache: a.cache,
},
@ -1440,7 +1439,6 @@ func (a *api) start(ctx context.Context) error {
Password: cfg.Storage.Memory.Auth.Password,
DefaultFile: "",
DefaultContentType: "application/data",
Gzip: true,
Filesystem: a.memfs,
Cache: nil,
},
@ -1456,7 +1454,6 @@ func (a *api) start(ctx context.Context) error {
Password: s3.Auth.Password,
DefaultFile: "",
DefaultContentType: "application/data",
Gzip: true,
Filesystem: a.s3fs[s3.Name],
Cache: a.cache,
})
@ -1469,7 +1466,7 @@ func (a *api) start(ctx context.Context) error {
Restream: a.restream,
Metrics: a.metrics,
Prometheus: a.prom,
MimeTypesFile: cfg.Storage.MimeTypes,
MimeTypesFile: cfg.Storage.MimeTypesFile,
Filesystems: httpfilesystems,
IPLimiter: iplimiter,
Profiling: cfg.Debug.Profiling,
@ -1501,6 +1498,11 @@ func (a *api) start(ctx context.Context) error {
return false
},
Resources: a.resources,
Compress: http.CompressConfig{
Encoding: cfg.Compress.Encoding,
MimeTypes: cfg.Compress.MimeTypes,
MinLength: cfg.Compress.MinLength,
},
}
mainserverhandler, err := http.NewServer(serverConfig)

View File

@ -94,6 +94,7 @@ func (d *Config) Clone() *Config {
data.Log = d.Log
data.DB = d.DB
data.Host = d.Host
data.Compress = d.Compress
data.API = d.API
data.TLS = d.TLS
data.Storage = d.Storage
@ -113,6 +114,9 @@ func (d *Config) Clone() *Config {
data.Host.Name = slices.Copy(d.Host.Name)
data.Compress.Encoding = slices.Copy(d.Compress.Encoding)
data.Compress.MimeTypes = slices.Copy(d.Compress.MimeTypes)
data.API.Access.HTTP.Allow = slices.Copy(d.API.Access.HTTP.Allow)
data.API.Access.HTTP.Block = slices.Copy(d.API.Access.HTTP.Block)
data.API.Access.HTTPS.Allow = slices.Copy(d.API.Access.HTTPS.Allow)
@ -164,6 +168,21 @@ func (d *Config) init() {
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
d.vars.Register(value.NewStringList(&d.Compress.Encoding, []string{"gzip"}, ","), "compress.encoding", "CORE_COMPRESS_ENCODING", nil, "Comma separated list of content encodings", false, false)
d.vars.Register(value.NewStringList(&d.Compress.MimeTypes, []string{
"text/plain",
"text/html",
"text/css",
"text/javascript",
"application/json",
"application/x-mpegurl",
"application/vnd.apple.mpegurl",
"image/svg+xml",
"text/event-stream",
"application/x-json-stream",
}, ","), "compress.mimetypes", "CORE_COMPRESS_MIMETYPES", nil, "Comma separated list of mimetypes to compress", false, false)
d.vars.Register(value.NewInt(&d.Compress.MinLength, 1000), "compress.min_length", "CORE_COMPRESS_MIN_LENGTH", nil, "Minimum size before compression will be used", false, false)
// API
d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
@ -193,7 +212,7 @@ func (d *Config) init() {
d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEY_FILE", []string{"CORE_TLS_KEYFILE"}, "Path to key file in PEM format", false, false)
// Storage
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
d.vars.Register(value.NewFile(&d.Storage.MimeTypesFile, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
// Storage (Disk)
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)

View File

@ -32,6 +32,11 @@ type Data struct {
Name []string `json:"name"`
Auto bool `json:"auto"`
} `json:"host"`
Compress struct {
Encoding []string `json:"encoding"`
MimeTypes []string `json:"mimetypes"`
MinLength int `json:"min_length" jsonschema:"minimum=0"`
} `json:"compress"`
API struct {
ReadOnly bool `json:"read_only"`
Access struct {
@ -100,7 +105,7 @@ type Data struct {
CORS struct {
Origins []string `json:"origins"`
} `json:"cors"`
MimeTypes string `json:"mimetypes_file"`
MimeTypesFile string `json:"mimetypes_file"`
} `json:"storage"`
RTMP struct {
Enable bool `json:"enable"`
@ -259,7 +264,7 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
data.Router.BlockedPrefixes = slices.Copy(d.Router.BlockedPrefixes)
data.Router.Routes = copy.StringMap(d.Router.Routes)
data.Storage.MimeTypes = d.Storage.MimeTypes
data.Storage.MimeTypesFile = d.Storage.MimeTypes
data.Storage.CORS = d.Storage.CORS
data.Storage.CORS.Origins = slices.Copy(d.Storage.CORS.Origins)
@ -367,7 +372,7 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) {
data.TLS.CertFile = d.TLS.CertFile
data.TLS.KeyFile = d.TLS.KeyFile
data.Storage.MimeTypes = d.Storage.MimeTypes
data.Storage.MimeTypes = d.Storage.MimeTypesFile
data.Storage.CORS = d.Storage.CORS
data.Storage.CORS.Origins = slices.Copy(d.Storage.CORS.Origins)

63
go.mod
View File

@ -1,58 +1,58 @@
module github.com/datarhei/core/v16
go 1.22.0
go 1.22.5
toolchain go1.22.1
toolchain go1.23.1
require (
github.com/99designs/gqlgen v0.17.49
github.com/Masterminds/semver/v3 v3.2.1
github.com/adhocore/gronx v1.19.0
github.com/99designs/gqlgen v0.17.55
github.com/Masterminds/semver/v3 v3.3.0
github.com/adhocore/gronx v1.19.1
github.com/andybalholm/brotli v1.1.0
github.com/atrox/haikunatorgo/v2 v2.0.1
github.com/caddyserver/certmagic v0.21.3
github.com/caddyserver/certmagic v0.21.4
github.com/datarhei/gosrt v0.7.0
github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e
github.com/dolthub/swiss v0.2.1
github.com/fujiwara/shapeio v1.0.0
github.com/go-playground/validator/v10 v10.22.0
github.com/go-playground/validator/v10 v10.22.1
github.com/gobwas/glob v0.2.3
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/golang-jwt/jwt/v5 v5.2.1
github.com/google/gops v0.3.28
github.com/google/uuid v1.6.0
github.com/hashicorp/go-hclog v1.6.3
github.com/hashicorp/raft v1.7.0
github.com/hashicorp/raft v1.7.1
github.com/hashicorp/raft-boltdb/v2 v2.3.0
github.com/invopop/jsonschema v0.4.0
github.com/joho/godotenv v1.5.1
github.com/klauspost/compress v1.17.9
github.com/klauspost/compress v1.17.10
github.com/klauspost/cpuid/v2 v2.2.8
github.com/labstack/echo/v4 v4.12.0
github.com/lestrrat-go/strftime v1.0.6
github.com/lestrrat-go/strftime v1.1.0
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.20
github.com/minio/minio-go/v7 v7.0.75
github.com/minio/minio-go/v7 v7.0.77
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
github.com/prometheus/client_golang v1.20.0
github.com/prometheus/client_golang v1.20.4
github.com/puzpuzpuz/xsync/v3 v3.4.0
github.com/shirou/gopsutil/v3 v3.24.5
github.com/stretchr/testify v1.9.0
github.com/swaggo/echo-swagger v1.4.1
github.com/swaggo/swag v1.16.3
github.com/tklauser/go-sysconf v0.3.14
github.com/vektah/gqlparser/v2 v2.5.16
github.com/vektah/gqlparser/v2 v2.5.17
github.com/xeipuuv/gojsonschema v1.2.0
go.etcd.io/bbolt v1.3.10
go.uber.org/automaxprocs v1.5.3
go.etcd.io/bbolt v1.3.11
go.uber.org/automaxprocs v1.6.0
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.26.0
golang.org/x/mod v0.20.0
golang.org/x/crypto v0.28.0
golang.org/x/mod v0.21.0
)
require (
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/agnivade/levenshtein v1.2.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect
github.com/beorn7/perks v1.0.1 // indirect
@ -86,42 +86,41 @@ require (
github.com/labstack/gommon v0.4.2 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/libdns/libdns v0.2.2 // indirect
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mholt/acmez/v2 v2.0.2 // indirect
github.com/mholt/acmez/v2 v2.0.3 // indirect
github.com/miekg/dns v1.1.62 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/common v0.60.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rs/xid v1.5.0 // indirect
github.com/rs/xid v1.6.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sosodev/duration v1.3.1 // indirect
github.com/swaggo/files/v2 v2.0.1 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect
github.com/urfave/cli/v2 v2.27.2 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/urfave/cli/v2 v2.27.4 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zeebo/blake3 v0.2.4 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.24.0 // indirect
golang.org/x/text v0.17.0 // indirect
golang.org/x/time v0.6.0 // indirect
golang.org/x/tools v0.24.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

126
go.sum
View File

@ -1,16 +1,16 @@
github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ=
github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0=
github.com/99designs/gqlgen v0.17.55 h1:3vzrNWYyzSZjGDFo68e5j9sSauLxfKvLp+6ioRokVtM=
github.com/99designs/gqlgen v0.17.55/go.mod h1:3Bq768f8hgVPGZxL8aY9MaYmbxa6llPM/qu1IGH1EJo=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4yPeE=
github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk=
github.com/adhocore/gronx v1.19.0 h1:GrEvNMPDwXND+YFadCyFVQPC+/xxoGJaQzu+duNf6aU=
github.com/adhocore/gronx v1.19.0/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0=
github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U=
github.com/adhocore/gronx v1.19.1 h1:S4c3uVp5jPjnk00De0lslyTenGJ4nA3Ydbkj1SbdPVc=
github.com/adhocore/gronx v1.19.1/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg=
github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@ -35,8 +35,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/caddyserver/certmagic v0.21.3 h1:pqRRry3yuB4CWBVq9+cUqu+Y6E2z8TswbhNx1AZeYm0=
github.com/caddyserver/certmagic v0.21.3/go.mod h1:Zq6pklO9nVRl3DIFUw9gVUfXKdpc/0qwTUAQMBlfgtI=
github.com/caddyserver/certmagic v0.21.4 h1:e7VobB8rffHv8ZZpSiZtEwnLDHUwLVYLWzWSa1FfKI0=
github.com/caddyserver/certmagic v0.21.4/go.mod h1:swUXjQ1T9ZtMv95qj7/InJvWLXURU85r+CfG0T+ZbDE=
github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -53,8 +53,8 @@ github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e/go.mod h1:Jcw/6jZDQQ
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
github.com/dolthub/swiss v0.2.1 h1:gs2osYs5SJkAaH5/ggVJqXQxRXtWshF6uE0lgR/Y3Gw=
@ -94,8 +94,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA=
github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
@ -141,8 +141,8 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o=
github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0=
github.com/hashicorp/raft v1.7.1 h1:ytxsNx4baHsRZrhUcbt3+79zc4ly8qm7pi0393pSchY=
github.com/hashicorp/raft v1.7.1/go.mod h1:hUeiEwQQR/Nk2iKDD0dkEhklSsu3jcAcqvPzPoZSAEM=
github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ=
github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0=
github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA=
@ -159,8 +159,8 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@ -183,14 +183,14 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is=
github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ=
github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw=
github.com/lestrrat-go/strftime v1.1.0 h1:gMESpZy44/4pXLO/m+sL0yBd1W6LjgjrrD4a68Gapyg=
github.com/lestrrat-go/strftime v1.1.0/go.mod h1:uzeIB52CeUJenCo1syghlugshMysrqUT51HlxphXVeI=
github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c=
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI=
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@ -203,14 +203,14 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mholt/acmez/v2 v2.0.2 h1:OmK6xckte2JfKGPz4OAA8aNHTiLvGp8tLzmrd/wfSyw=
github.com/mholt/acmez/v2 v2.0.2/go.mod h1:fX4c9r5jYwMyMsC+7tkYRxHibkOTgta5DIFGoe67e1U=
github.com/mholt/acmez/v2 v2.0.3 h1:CgDBlEwg3QBp6s45tPQmFIBrkRIkBT4rW4orMM6p4sw=
github.com/mholt/acmez/v2 v2.0.3/go.mod h1:pQ1ysaDeGrIMvJ9dfJMk5kJNkn7L2sb3UhyrX6Q91cw=
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.75 h1:0uLrB6u6teY2Jt+cJUVi9cTvDRuBKWSRzSAcznRkwlE=
github.com/minio/minio-go/v7 v7.0.75/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw=
github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -224,8 +224,6 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -238,8 +236,8 @@ github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS1
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI=
github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -247,8 +245,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@ -258,8 +256,8 @@ github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
@ -291,17 +289,17 @@ github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg=
github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8=
github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8=
github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
github.com/vektah/gqlparser/v2 v2.5.17 h1:9At7WblLV7/36nulgekUgIaqHZWn5hxqluxrxGUhOmI=
github.com/vektah/gqlparser/v2 v2.5.17/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -309,8 +307,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
@ -319,10 +317,10 @@ github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@ -331,14 +329,14 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -360,19 +358,19 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -17,7 +17,6 @@ type FS struct {
DefaultFile string
DefaultContentType string
Gzip bool
Filesystem fs.Filesystem

View File

@ -51,7 +51,7 @@ func TestConfigSetConflict(t *testing.T) {
router, _ := getDummyConfigRouter(t)
cfg := config.New(nil)
cfg.Storage.MimeTypes = "/path/to/mime.types"
cfg.Storage.MimeTypesFile = "/path/to/mime.types"
var data bytes.Buffer

View File

@ -15,7 +15,7 @@ func NewBrotli(level Level) Compression {
brotliLevel := brotli.DefaultCompression
if level == BestCompression {
brotliLevel = brotli.BestCompression
} else {
} else if level == BestSpeed {
brotliLevel = brotli.BestSpeed
}

View File

@ -8,8 +8,9 @@ import (
"net"
"net/http"
"strings"
"sync"
"github.com/datarhei/core/v16/mem"
"github.com/datarhei/core/v16/slices"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
@ -27,8 +28,11 @@ type Config struct {
// is used. Optional. Default value 0
MinLength int
// Schemes is a list of enabled compressiond. Optional. Default [GzipScheme, ZstdScheme]
Schemes []Scheme
// Schemes is a list of enabled compressiond. Optional. Default [gzip]
Schemes []string
// List of content type to compress. If empty, everything will be compressed
ContentTypes []string
}
type Compression interface {
@ -46,6 +50,7 @@ type Compressor interface {
type compressResponseWriter struct {
Compressor
http.ResponseWriter
hasHeader bool
wroteHeader bool
wroteBody bool
minLength int
@ -54,20 +59,10 @@ type compressResponseWriter struct {
code int
headerContentLength string
scheme string
contentTypes []string
passThrough bool
}
type Scheme string
func (s Scheme) String() string {
return string(s)
}
const (
GzipScheme Scheme = "gzip"
BrotliScheme Scheme = "br"
ZstdScheme Scheme = "zstd"
)
type Level int
const (
@ -78,33 +73,11 @@ const (
// DefaultConfig is the default Gzip middleware config.
var DefaultConfig = Config{
Skipper: middleware.DefaultSkipper,
Level: DefaultCompression,
MinLength: 0,
Schemes: []Scheme{GzipScheme},
}
// ContentTypesSkipper returns a Skipper based on the list of content types
// that should be compressed. If the list is empty, all responses will be
// compressed.
func ContentTypeSkipper(contentTypes []string) middleware.Skipper {
return func(c echo.Context) bool {
// If no allowed content types are given, compress all
if len(contentTypes) == 0 {
return false
}
// Iterate through the allowed content types and don't skip if the content type matches
responseContentType := c.Response().Header().Get(echo.HeaderContentType)
for _, contentType := range contentTypes {
if strings.Contains(responseContentType, contentType) {
return false
}
}
return true
}
Skipper: middleware.DefaultSkipper,
Level: DefaultCompression,
MinLength: 0,
Schemes: []string{"gzip"},
ContentTypes: []string{},
}
// New returns a middleware which compresses HTTP response using a compression
@ -133,38 +106,40 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
config.Schemes = DefaultConfig.Schemes
}
contentTypes := slices.Copy(config.ContentTypes)
gzipEnable := false
brotliEnable := false
zstdEnable := false
for _, s := range config.Schemes {
switch s {
case GzipScheme:
case "gzip":
gzipEnable = true
case BrotliScheme:
case "br":
brotliEnable = true
case ZstdScheme:
case "zstd":
zstdEnable = true
}
}
var gzipPool Compression
var brotliPool Compression
var zstdPool Compression
var gzipCompressor Compression
var brotliCompressor Compression
var zstdCompressor Compression
if gzipEnable {
gzipPool = NewGzip(config.Level)
gzipCompressor = NewGzip(config.Level)
}
if brotliEnable {
brotliPool = NewBrotli(config.Level)
brotliCompressor = NewBrotli(config.Level)
}
if zstdEnable {
zstdPool = NewZstd(config.Level)
zstdCompressor = NewZstd(config.Level)
}
bpool := bufferPool()
bufferPool := mem.NewBufferPool()
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
@ -173,62 +148,69 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
}
res := c.Response()
res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
encodings := c.Request().Header.Get(echo.HeaderAcceptEncoding)
var pool Compression
var scheme Scheme
var compress Compression
var scheme string
if zstdEnable && strings.Contains(encodings, ZstdScheme.String()) {
pool = zstdPool
scheme = ZstdScheme
} else if brotliEnable && strings.Contains(encodings, BrotliScheme.String()) {
pool = brotliPool
scheme = BrotliScheme
} else if gzipEnable && strings.Contains(encodings, GzipScheme.String()) {
pool = gzipPool
scheme = GzipScheme
if zstdEnable && strings.Contains(encodings, "zstd") {
compress = zstdCompressor
scheme = "zstd"
} else if brotliEnable && strings.Contains(encodings, "br") {
compress = brotliCompressor
scheme = "br"
} else if gzipEnable && strings.Contains(encodings, "gzip") {
compress = gzipCompressor
scheme = "gzip"
}
if pool != nil {
w := pool.Acquire()
if w == nil {
if compress != nil {
compressor := compress.Acquire()
if compressor == nil {
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Errorf("failed to acquire compressor for %s", scheme))
}
rw := res.Writer
w.Reset(rw)
compressor.Reset(rw)
buf := bpool.Get().(*bytes.Buffer)
buf.Reset()
buffer := bufferPool.Get()
grw := &compressResponseWriter{Compressor: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf, scheme: scheme.String()}
grw := &compressResponseWriter{
Compressor: compressor,
ResponseWriter: rw,
minLength: config.MinLength,
buffer: buffer,
scheme: scheme,
contentTypes: contentTypes,
}
defer func() {
if !grw.wroteBody {
if res.Header().Get(echo.HeaderContentEncoding) == scheme.String() {
res.Header().Del(echo.HeaderContentEncoding)
}
// We have to reset response to it's pristine state when
// nothing is written to body or error is returned.
// See issue #424, #407.
res.Writer = rw
w.Reset(io.Discard)
} else if !grw.minLengthExceeded {
// If the minimum content length hasn't exceeded, write the uncompressed response
res.Writer = rw
if grw.wroteHeader {
// Restore Content-Length header in case it was deleted
if len(grw.headerContentLength) != 0 {
grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength)
if !grw.passThrough {
if !grw.wroteBody {
if res.Header().Get(echo.HeaderContentEncoding) == scheme {
res.Header().Del(echo.HeaderContentEncoding)
}
grw.ResponseWriter.WriteHeader(grw.code)
// We have to reset response to it's pristine state when
// nothing is written to body or error is returned.
// See issue #424, #407.
res.Writer = rw
compressor.Reset(io.Discard)
} else if !grw.minLengthExceeded {
// If the minimum content length hasn't exceeded, write the uncompressed response
res.Writer = rw
if grw.wroteHeader {
// Restore Content-Length header in case it was deleted
if len(grw.headerContentLength) != 0 {
grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength)
}
grw.ResponseWriter.WriteHeader(grw.code)
}
grw.buffer.WriteTo(rw)
compressor.Reset(io.Discard)
}
grw.buffer.WriteTo(rw)
w.Reset(io.Discard)
}
w.Close()
bpool.Put(buf)
pool.Release(w)
compressor.Close()
bufferPool.Put(buffer)
compress.Release(compressor)
}()
res.Writer = grw
@ -241,17 +223,37 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
func (w *compressResponseWriter) WriteHeader(code int) {
if code == http.StatusNoContent { // Issue #489
w.ResponseWriter.Header().Del(echo.HeaderContentEncoding)
w.Header().Del(echo.HeaderContentEncoding)
}
w.headerContentLength = w.Header().Get(echo.HeaderContentLength)
w.Header().Del(echo.HeaderContentLength) // Issue #444
w.wroteHeader = true
if !w.canCompress(w.Header().Get(echo.HeaderContentType)) {
w.passThrough = true
}
w.hasHeader = true
// Delay writing of the header until we know if we'll actually compress the response
w.code = code
}
func (w *compressResponseWriter) canCompress(responseContentType string) bool {
// If no content types are given, compress all
if len(w.contentTypes) == 0 {
return true
}
// Iterate through the allowed content types and don't skip if the content type matches
for _, contentType := range w.contentTypes {
if strings.Contains(responseContentType, contentType) {
return true
}
}
return false
}
func (w *compressResponseWriter) Write(b []byte) (int, error) {
if w.Header().Get(echo.HeaderContentType) == "" {
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
@ -259,6 +261,18 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
w.wroteBody = true
if !w.hasHeader {
w.WriteHeader(http.StatusOK)
}
if w.passThrough {
if !w.wroteHeader {
w.ResponseWriter.WriteHeader(w.code)
w.wroteHeader = true
}
return w.ResponseWriter.Write(b)
}
if !w.minLengthExceeded {
n, err := w.buffer.Write(b)
@ -267,8 +281,10 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
// The minimum length is exceeded, add Content-Encoding header and write the header
w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806
if w.wroteHeader {
w.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
if w.hasHeader {
w.ResponseWriter.WriteHeader(w.code)
w.wroteHeader = true
}
return w.Compressor.Write(w.buffer.Bytes())
@ -281,12 +297,31 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
}
func (w *compressResponseWriter) Flush() {
if !w.hasHeader {
w.WriteHeader(http.StatusOK)
}
if w.passThrough {
if !w.wroteHeader {
w.ResponseWriter.WriteHeader(w.code)
w.wroteHeader = true
}
if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
return
}
if !w.minLengthExceeded {
// Enforce compression
w.minLengthExceeded = true
w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806
if w.wroteHeader {
w.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
if w.hasHeader {
w.ResponseWriter.WriteHeader(w.code)
w.wroteHeader = true
}
w.Compressor.Write(w.buffer.Bytes())
@ -308,12 +343,3 @@ func (w *compressResponseWriter) Push(target string, opts *http.PushOptions) err
}
return http.ErrNotSupported
}
func bufferPool() sync.Pool {
return sync.Pool{
New: func() interface{} {
b := &bytes.Buffer{}
return b
},
}
}

View File

@ -58,15 +58,15 @@ func (rcr *nopReadCloseResetter) Reset(r io.Reader) error {
return resetter.Reset(r)
}
func getTestcases() map[Scheme]func(r io.Reader) (ReadCloseResetter, error) {
return map[Scheme]func(r io.Reader) (ReadCloseResetter, error){
GzipScheme: func(r io.Reader) (ReadCloseResetter, error) {
func getTestcases() map[string]func(r io.Reader) (ReadCloseResetter, error) {
return map[string]func(r io.Reader) (ReadCloseResetter, error){
"gzip": func(r io.Reader) (ReadCloseResetter, error) {
return gzip.NewReader(r)
},
BrotliScheme: func(r io.Reader) (ReadCloseResetter, error) {
"br": func(r io.Reader) (ReadCloseResetter, error) {
return &nopReadCloseResetter{brotli.NewReader(r)}, nil
},
ZstdScheme: func(r io.Reader) (ReadCloseResetter, error) {
"zstd": func(r io.Reader) (ReadCloseResetter, error) {
reader, err := zstd.NewReader(r)
return &nopReadCloseResetter{reader}, err
},
@ -77,18 +77,18 @@ func TestCompress(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
ctx := e.NewContext(req, rec)
// Skip if no Accept-Encoding header
h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
handler := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write([]byte("test")) // For Content-Type sniffing
return nil
})
h(c)
handler(ctx)
assert := assert.New(t)
@ -96,15 +96,15 @@ func TestCompress(t *testing.T) {
// Compression
req = httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
c = e.NewContext(req, rec)
h(c)
assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
ctx = e.NewContext(req, rec)
handler(ctx)
assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding))
assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain)
r, err := reader(rec.Body)
if assert.NoError(err) {
buf := new(bytes.Buffer)
buf := &bytes.Buffer{}
defer r.Close()
buf.ReadFrom(r)
assert.Equal("test", buf.String())
@ -112,11 +112,11 @@ func TestCompress(t *testing.T) {
// Gzip chunked
req = httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
c = e.NewContext(req, rec)
NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
ctx = e.NewContext(req, rec)
NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Header().Set("Content-Type", "text/event-stream")
c.Response().Header().Set("Transfer-Encoding", "chunked")
@ -126,7 +126,7 @@ func TestCompress(t *testing.T) {
// Read the first part of the data
assert.True(rec.Flushed)
assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding))
// Write and flush the second part of the data
c.Response().Write([]byte("tost\n"))
@ -135,7 +135,7 @@ func TestCompress(t *testing.T) {
// Write the final part of the data and return
c.Response().Write([]byte("tast"))
return nil
})(c)
})(ctx)
buf := new(bytes.Buffer)
r.Reset(rec.Body)
@ -146,14 +146,53 @@ func TestCompress(t *testing.T) {
}
}
func TestCompressWithPassthrough(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []string{scheme}, ContentTypes: []string{"text/compress"}}))
e.GET("/plain", func(c echo.Context) error {
c.Response().Header().Set("Content-Type", "text/plain")
c.Response().Write([]byte("testtest"))
return nil
})
e.GET("/compress", func(c echo.Context) error {
c.Response().Header().Set("Content-Type", "text/compress")
c.Response().Write([]byte("testtest"))
return nil
})
req := httptest.NewRequest(http.MethodGet, "/plain", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
assert.Contains(t, rec.Body.String(), "testtest")
req = httptest.NewRequest(http.MethodGet, "/compress", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
buf := new(bytes.Buffer)
defer r.Close()
buf.ReadFrom(r)
assert.Equal(t, "testtest", buf.String())
}
})
}
}
func TestCompressWithMinLength(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
// Invalid level
e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []Scheme{scheme}}))
e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []string{scheme}}))
e.GET("/", func(c echo.Context) error {
c.Response().Write([]byte("test"))
return nil
@ -163,17 +202,17 @@ func TestCompressWithMinLength(t *testing.T) {
return nil
})
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
assert.Contains(t, rec.Body.String(), "test")
req = httptest.NewRequest(http.MethodGet, "/foobar", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
buf := new(bytes.Buffer)
@ -185,17 +224,60 @@ func TestCompressWithMinLength(t *testing.T) {
}
}
func TestCompressWithAroundMinLength(t *testing.T) {
schemes := getTestcases()
minLength := 1000
for scheme, reader := range schemes {
for i := minLength - 64; i < minLength+64; i++ {
name := fmt.Sprintf("%s-%d", scheme, i)
t.Run(name, func(t *testing.T) {
data := rand.Bytes(i)
e := echo.New()
e.Use(NewWithConfig(Config{MinLength: minLength, Schemes: []string{scheme}}))
e.GET("/", func(c echo.Context) error {
c.Response().Write(data[:1])
c.Response().Write(data[1:])
return nil
})
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
if i < minLength {
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
res, err := io.ReadAll(rec.Body)
if assert.NoError(t, err) {
assert.Equal(t, data, res)
}
} else {
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
buf := new(bytes.Buffer)
defer r.Close()
buf.ReadFrom(r)
assert.Equal(t, data, buf.Bytes())
}
}
})
}
}
}
func TestCompressNoContent(t *testing.T) {
schemes := getTestcases()
for scheme := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
h := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
return c.NoContent(http.StatusNoContent)
})
if assert.NoError(t, h(c)) {
@ -211,17 +293,17 @@ func TestCompressEmpty(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
h := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
return c.String(http.StatusOK, "")
})
if assert.NoError(t, h(c)) {
assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(t, "text/plain; charset=UTF-8", rec.Header().Get(echo.HeaderContentType))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
@ -238,14 +320,14 @@ func TestCompressErrorReturned(t *testing.T) {
schemes := getTestcases()
for scheme := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
e.Use(NewWithConfig(Config{Schemes: []Scheme{scheme}}))
e.Use(NewWithConfig(Config{Schemes: []string{scheme}}))
e.GET("/", func(c echo.Context) error {
return echo.ErrNotFound
})
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, http.StatusNotFound, rec.Code)
@ -259,12 +341,12 @@ func TestCompressWithStatic(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
e.Use(NewWithConfig(Config{Schemes: []Scheme{scheme}}))
e.Use(NewWithConfig(Config{Schemes: []string{scheme}}))
e.Static("/test", "./")
req := httptest.NewRequest(http.MethodGet, "/test/compress.go", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, http.StatusOK, rec.Code)
@ -292,17 +374,17 @@ func BenchmarkCompress(b *testing.B) {
for i := 1; i <= 18; i++ {
datalen := 2 << i
data := []byte(rand.String(datalen))
data := rand.Bytes(datalen)
for scheme := range schemes {
name := fmt.Sprintf("%s-%d", scheme.String(), datalen)
name := fmt.Sprintf("%s-%d", scheme, datalen)
b.Run(name, func(b *testing.B) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
h := NewWithConfig(Config{Level: BestSpeed, Schemes: []Scheme{scheme}})(func(c echo.Context) error {
h := NewWithConfig(Config{Level: BestSpeed, Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write(data)
return nil
})
@ -327,13 +409,13 @@ func BenchmarkCompressJSON(b *testing.B) {
schemes := getTestcases()
for scheme := range schemes {
b.Run(scheme.String(), func(b *testing.B) {
b.Run(scheme, func(b *testing.B) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
h := NewWithConfig(Config{Level: BestSpeed, Schemes: []Scheme{scheme}})(func(c echo.Context) error {
h := NewWithConfig(Config{Level: BestSpeed, Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write(data)
return nil
})

View File

@ -0,0 +1,55 @@
package compress
import (
"compress/gzip"
"io"
"sync"
)
type gogzipImpl struct {
pool sync.Pool
}
func NewGoGzip(level Level) Compression {
gzipLevel := gzip.DefaultCompression
if level == BestCompression {
gzipLevel = gzip.BestCompression
} else if level == BestSpeed {
gzipLevel = gzip.BestSpeed
}
g := &gogzipImpl{
pool: sync.Pool{
New: func() interface{} {
w, err := gzip.NewWriterLevel(io.Discard, gzipLevel)
if err != nil {
return nil
}
return w
},
},
}
return g
}
func (g *gogzipImpl) Acquire() Compressor {
c := g.pool.Get()
if c == nil {
return nil
}
x, ok := c.(Compressor)
if !ok {
return nil
}
x.Reset(io.Discard)
return x
}
func (g *gogzipImpl) Release(c Compressor) {
c.Reset(io.Discard)
g.pool.Put(c)
}

View File

@ -15,7 +15,7 @@ func NewGzip(level Level) Compression {
gzipLevel := gzip.DefaultCompression
if level == BestCompression {
gzipLevel = gzip.BestCompression
} else {
} else if level == BestSpeed {
gzipLevel = gzip.BestSpeed
}

View File

@ -15,7 +15,7 @@ func NewZstd(level Level) Compression {
zstdLevel := zstd.SpeedDefault
if level == BestCompression {
zstdLevel = zstd.SpeedBestCompression
} else {
} else if level == BestSpeed {
zstdLevel = zstd.SpeedFastest
}

View File

@ -29,7 +29,7 @@ func (h *handler) handleHLS(c echo.Context, ctxuser string, data map[string]inte
return next(c)
}
func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error {
func (h *handler) handleHLSIngress(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error {
req := c.Request()
path := req.URL.Path
@ -97,7 +97,7 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri
return next(c)
}
func (h *handler) handleHLSEgress(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error {
func (h *handler) handleHLSEgress(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error {
req := c.Request()
res := c.Response()

View File

@ -102,12 +102,19 @@ type Config struct {
IAM iam.IAM
IAMSkipper func(ip string) bool
Resources resources.Resources
Compress CompressConfig
}
type CorsConfig struct {
Origins []string
}
type CompressConfig struct {
Encoding []string
MimeTypes []string
MinLength int
}
type server struct {
logger log.Logger
@ -143,8 +150,10 @@ type server struct {
iam echo.MiddlewareFunc
}
gzip struct {
compress struct {
encoding []string
mimetypes []string
minLength int
}
filesystems map[string]*filesystem
@ -375,15 +384,9 @@ func NewServer(config Config) (serverhandler.Server, error) {
IAM: config.IAM,
}, "/api/graph/query")
s.gzip.mimetypes = []string{
"text/plain",
"text/html",
"text/javascript",
"application/json",
//"application/x-mpegurl",
//"application/vnd.apple.mpegurl",
"image/svg+xml",
}
s.compress.encoding = config.Compress.Encoding
s.compress.mimetypes = config.Compress.MimeTypes
s.compress.minLength = config.Compress.MinLength
s.router = echo.New()
s.router.JSONSerializer = &GoJSONSerializer{}
@ -409,6 +412,13 @@ func NewServer(config Config) (serverhandler.Server, error) {
s.router.Use(s.middleware.iam)
s.router.Use(mwcompress.NewWithConfig(mwcompress.Config{
Level: mwcompress.BestSpeed,
MinLength: config.Compress.MinLength,
Schemes: config.Compress.Encoding,
ContentTypes: config.Compress.MimeTypes,
}))
s.router.Use(mwsession.NewWithConfig(mwsession.Config{
HLSIngressCollector: config.Sessions.Collector("hlsingress"),
HLSEgressCollector: config.Sessions.Collector("hls"),
@ -487,13 +497,6 @@ func (s *server) HTTPStatus() map[int]uint64 {
}
func (s *server) setRoutes() {
gzipMiddleware := mwcompress.NewWithConfig(mwcompress.Config{
Skipper: mwcompress.ContentTypeSkipper(nil),
Level: mwcompress.BestSpeed,
MinLength: 1000,
Schemes: []mwcompress.Scheme{mwcompress.GzipScheme},
})
// API router grouo
api := s.router.Group("/api")
@ -509,7 +512,6 @@ func (s *server) setRoutes() {
// Swagger API documentation router group
doc := s.router.Group("/api/swagger/*")
doc.Use(gzipMiddleware)
doc.GET("", echoSwagger.WrapHandler)
// Mount filesystems
@ -528,15 +530,6 @@ func (s *server) setRoutes() {
DefaultContentType: filesystem.DefaultContentType,
}))
if filesystem.Gzip {
fs.Use(mwcompress.NewWithConfig(mwcompress.Config{
Skipper: mwcompress.ContentTypeSkipper(s.gzip.mimetypes),
Level: mwcompress.BestSpeed,
MinLength: 1000,
Schemes: []mwcompress.Scheme{mwcompress.GzipScheme},
}))
}
if filesystem.Cache != nil {
mwcache := mwcache.NewWithConfig(mwcache.Config{
Cache: filesystem.Cache,
@ -590,7 +583,7 @@ func (s *server) setRoutes() {
// GraphQL
graphql := api.Group("/graph")
graphql.Use(gzipMiddleware)
//graphql.Use(gzipMiddleware)
graphql.GET("", s.handler.graph.Playground)
graphql.POST("/query", s.handler.graph.Query)
@ -598,7 +591,7 @@ func (s *server) setRoutes() {
// APIv3 router group
v3 := api.Group("/v3")
v3.Use(gzipMiddleware)
//v3.Use(gzipMiddleware)
s.setRoutesV3(v3)
}

View File

@ -15,6 +15,7 @@ import (
"github.com/datarhei/core/v16/glob"
"github.com/datarhei/core/v16/log"
"github.com/datarhei/core/v16/mem"
)
// MemConfig is the config that is required for creating
@ -126,37 +127,10 @@ func (f *memFile) free() {
f.data = nil
}
type fileDataPool struct {
pool sync.Pool
}
var pool *fileDataPool = nil
func NewFileDataPool() *fileDataPool {
p := &fileDataPool{
pool: sync.Pool{
New: func() any {
return &bytes.Buffer{}
},
},
}
return p
}
func (p *fileDataPool) Get() *bytes.Buffer {
buf := p.pool.Get().(*bytes.Buffer)
buf.Reset()
return buf
}
func (p *fileDataPool) Put(buf *bytes.Buffer) {
p.pool.Put(buf)
}
var pool *mem.BufferPool = nil
func init() {
pool = NewFileDataPool()
pool = mem.NewBufferPool()
}
type memFilesystem struct {

View File

@ -21,6 +21,12 @@ var seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))
var lock sync.Mutex
func StringWithCharset(length int, charset string) string {
b := BytesWithCharset(length, charset)
return string(b)
}
func BytesWithCharset(length int, charset string) []byte {
lock.Lock()
defer lock.Unlock()
@ -29,7 +35,7 @@ func StringWithCharset(length int, charset string) string {
b[i] = charset[seededRand.Intn(len(charset))]
}
return string(b)
return b
}
func StringLetters(length int) string {
@ -47,3 +53,7 @@ func StringAlphanumeric(length int) string {
func String(length int) string {
return StringWithCharset(length, CharsetAll)
}
func Bytes(length int) []byte {
return BytesWithCharset(length, CharsetAll)
}

33
mem/buffer.go Normal file
View File

@ -0,0 +1,33 @@
package mem
import (
"bytes"
"sync"
)
type BufferPool struct {
pool sync.Pool
}
func NewBufferPool() *BufferPool {
p := &BufferPool{
pool: sync.Pool{
New: func() any {
return &bytes.Buffer{}
},
},
}
return p
}
func (p *BufferPool) Get() *bytes.Buffer {
buf := p.pool.Get().(*bytes.Buffer)
buf.Reset()
return buf
}
func (p *BufferPool) Put(buf *bytes.Buffer) {
p.pool.Put(buf)
}

View File

@ -106,6 +106,10 @@ issues:
- path: codegen/testserver/.*/resolver\.go
linters:
- gocritic
# The interfaces are autogenerated and don't conform to the paramTypeCombine rule
- path: _examples/federation/products/graph/entity.resolvers.go
linters:
- gocritic
# Disable revive.use-any for backwards compatibility
- path: graphql/map.go
text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
@ -113,3 +117,11 @@ issues:
text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
- path: codegen/testserver/singlefile/resolver.go
text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
- path: codegen/testserver/generated_test.go
linters:
- staticcheck
text: SA1019
- path: plugin/modelgen/models_test.go
linters:
- staticcheck
text: SA1019

File diff suppressed because it is too large Load Diff

View File

@ -45,7 +45,11 @@ func Generate(cfg *config.Config, option ...Option) error {
}
}
}
plugins = append([]plugin.Plugin{federation.New(cfg.Federation.Version)}, plugins...)
federationPlugin, err := federation.New(cfg.Federation.Version, cfg)
if err != nil {
return fmt.Errorf("failed to construct the Federation plugin: %w", err)
}
plugins = append([]plugin.Plugin{federationPlugin}, plugins...)
}
for _, o := range option {
@ -58,6 +62,13 @@ func Generate(cfg *config.Config, option ...Option) error {
cfg.Sources = append(cfg.Sources, s)
}
}
if inj, ok := p.(plugin.EarlySourcesInjector); ok {
s, err := inj.InjectSourcesEarly()
if err != nil {
return fmt.Errorf("%s: %w", p.Name(), err)
}
cfg.Sources = append(cfg.Sources, s...)
}
}
if err := cfg.LoadSchema(); err != nil {
@ -70,6 +81,13 @@ func Generate(cfg *config.Config, option ...Option) error {
cfg.Sources = append(cfg.Sources, s)
}
}
if inj, ok := p.(plugin.LateSourcesInjector); ok {
s, err := inj.InjectSourcesLate(cfg.Schema)
if err != nil {
return fmt.Errorf("%s: %w", p.Name(), err)
}
cfg.Sources = append(cfg.Sources, s...)
}
}
// LoadSchema again now we have everything

View File

@ -29,19 +29,20 @@ func PrependPlugin(p plugin.Plugin) Option {
// ReplacePlugin replaces any existing plugin with a matching plugin name
func ReplacePlugin(p plugin.Plugin) Option {
return func(cfg *config.Config, plugins *[]plugin.Plugin) {
if plugins != nil {
found := false
ps := *plugins
for i, o := range ps {
if p.Name() == o.Name() {
ps[i] = p
found = true
}
}
if !found {
ps = append(ps, p)
}
*plugins = ps
if plugins == nil {
return
}
found := false
ps := *plugins
for i, o := range ps {
if p.Name() == o.Name() {
ps[i] = p
found = true
}
}
if !found {
ps = append(ps, p)
}
*plugins = ps
}
}

View File

@ -18,19 +18,20 @@ type ArgSet struct {
type FieldArgument struct {
*ast.ArgumentDefinition
TypeReference *config.TypeReference
VarName string // The name of the var in go
Object *Object // A link back to the parent object
Default any // The default value
Directives []*Directive
Value any // value set in Data
TypeReference *config.TypeReference
VarName string // The name of the var in go
Object *Object // A link back to the parent object
Default any // The default value
Directives []*Directive
Value any // value set in Data
CallArgumentDirectivesWithNull bool
}
// ImplDirectives get not Builtin and location ARGUMENT_DEFINITION directive
// ImplDirectives get not SkipRuntime and location ARGUMENT_DEFINITION directive
func (f *FieldArgument) ImplDirectives() []*Directive {
d := make([]*Directive, 0)
for i := range f.Directives {
if !f.Directives[i].Builtin && f.Directives[i].IsLocation(ast.LocationArgumentDefinition) {
if !f.Directives[i].SkipRuntime && f.Directives[i].IsLocation(ast.LocationArgumentDefinition) {
d = append(d, f.Directives[i])
}
}
@ -57,11 +58,12 @@ func (b *builder) buildArg(obj *Object, arg *ast.ArgumentDefinition) (*FieldArgu
return nil, err
}
newArg := FieldArgument{
ArgumentDefinition: arg,
TypeReference: tr,
Object: obj,
VarName: templates.ToGoPrivate(arg.Name),
Directives: argDirs,
ArgumentDefinition: arg,
TypeReference: tr,
Object: obj,
VarName: templates.ToGoPrivate(arg.Name),
Directives: argDirs,
CallArgumentDirectivesWithNull: b.Config.CallArgumentDirectivesWithNull,
}
if arg.DefaultValue != nil {

View File

@ -2,35 +2,67 @@
func (ec *executionContext) {{ $name }}(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
{{- range $i, $arg := . }}
var arg{{$i}} {{ $arg.TypeReference.GO | ref}}
if tmp, ok := rawArgs[{{$arg.Name|quote}}]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField({{$arg.Name|quote}}))
{{- if $arg.ImplDirectives }}
directive0 := func(ctx context.Context) (interface{}, error) { return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp) }
{{ template "implDirectives" $arg }}
tmp, err = directive{{$arg.ImplDirectives|len}}(ctx)
if err != nil {
return nil, graphql.ErrorOnPath(ctx, err)
}
if data, ok := tmp.({{ $arg.TypeReference.GO | ref }}) ; ok {
arg{{$i}} = data
{{- if $arg.TypeReference.IsNilable }}
} else if tmp == nil {
arg{{$i}} = nil
{{- end }}
} else {
return nil, graphql.ErrorOnPath(ctx, fmt.Errorf(`unexpected type %T from directive, should be {{ $arg.TypeReference.GO }}`, tmp))
}
{{- else }}
arg{{$i}}, err = ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp)
if err != nil {
return nil, err
}
{{- end }}
arg{{$i}}, err := ec.{{ $name }}{{$arg.Name | go}}(ctx, rawArgs)
if err != nil {
return nil, err
}
args[{{$arg.Name|quote}}] = arg{{$i}}
{{- end }}
return args, nil
}
{{- range $i, $arg := . }}
func (ec *executionContext) {{ $name }}{{$arg.Name | go}}(
ctx context.Context,
rawArgs map[string]interface{},
) ({{ $arg.TypeReference.GO | ref}}, error) {
{{- if not .CallArgumentDirectivesWithNull}}
// We won't call the directive if the argument is null.
// Set call_argument_directives_with_null to true to call directives
// even if the argument is null.
_, ok := rawArgs[{{$arg.Name|quote}}]
if !ok {
var zeroVal {{ $arg.TypeReference.GO | ref}}
return zeroVal, nil
}
{{end}}
ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField({{$arg.Name|quote}}))
{{- if $arg.ImplDirectives }}
directive0 := func(ctx context.Context) (interface{}, error) {
tmp, ok := rawArgs[{{$arg.Name|quote}}]
if !ok {
var zeroVal {{ $arg.TypeReference.GO | ref}}
return zeroVal, nil
}
return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp)
}
{{ template "implDirectives" $arg }}
tmp, err := directive{{$arg.ImplDirectives|len}}(ctx)
if err != nil {
var zeroVal {{ $arg.TypeReference.GO | ref}}
return zeroVal, graphql.ErrorOnPath(ctx, err)
}
if data, ok := tmp.({{ $arg.TypeReference.GO | ref }}) ; ok {
return data, nil
{{- if $arg.TypeReference.IsNilable }}
} else if tmp == nil {
var zeroVal {{ $arg.TypeReference.GO | ref}}
return zeroVal, nil
{{- end }}
} else {
var zeroVal {{ $arg.TypeReference.GO | ref}}
return zeroVal, graphql.ErrorOnPath(ctx, fmt.Errorf(`unexpected type %T from directive, should be {{ $arg.TypeReference.GO }}`, tmp))
}
{{- else }}
if tmp, ok := rawArgs[{{$arg.Name|quote}}]; ok {
return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp)
}
var zeroVal {{ $arg.TypeReference.GO | ref}}
return zeroVal, nil
{{- end }}
}
{{end}}
{{ end }}

View File

@ -36,7 +36,7 @@ func (c *Config) NewBinder() *Binder {
}
func (b *Binder) TypePosition(typ types.Type) token.Position {
named, isNamed := typ.(*types.Named)
named, isNamed := code.Unalias(typ).(*types.Named)
if !isNamed {
return token.Position{
Filename: "unknown",
@ -77,10 +77,11 @@ func (b *Binder) FindType(pkgName, typeName string) (types.Type, error) {
return nil, err
}
if fun, isFunc := obj.(*types.Func); isFunc {
return fun.Type().(*types.Signature).Params().At(0).Type(), nil
t := code.Unalias(obj.Type())
if _, isFunc := obj.(*types.Func); isFunc {
return code.Unalias(t.(*types.Signature).Params().At(0).Type()), nil
}
return obj.Type(), nil
return t, nil
}
func (b *Binder) InstantiateType(orig types.Type, targs []types.Type) (types.Type, error) {
@ -120,7 +121,7 @@ func (b *Binder) DefaultUserObject(name string) (types.Type, error) {
return nil, err
}
return obj.Type(), nil
return code.Unalias(obj.Type()), nil
}
func (b *Binder) FindObject(pkgName, typeName string) (types.Object, error) {
@ -193,19 +194,19 @@ func (b *Binder) PointerTo(ref *TypeReference) *TypeReference {
// TypeReference is used by args and field types. The Definition can refer to both input and output types.
type TypeReference struct {
Definition *ast.Definition
GQL *ast.Type
GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target.
Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields.
CastType types.Type // Before calling marshalling functions cast from/to this base type
Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function
Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function
IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler
IsOmittable bool // Is the type wrapped with Omittable
IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety.
PointersInUmarshalInput bool // Inverse values and pointers in return.
IsRoot bool // Is the type a root level definition such as Query, Mutation or Subscription
EnumValues []EnumValueReference
Definition *ast.Definition
GQL *ast.Type
GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target.
Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields.
CastType types.Type // Before calling marshalling functions cast from/to this base type
Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function
Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function
IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler
IsOmittable bool // Is the type wrapped with Omittable
IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety.
PointersInUnmarshalInput bool // Inverse values and pointers in return.
IsRoot bool // Is the type a root level definition such as Query, Mutation or Subscription
EnumValues []EnumValueReference
}
func (ref *TypeReference) Elem() *TypeReference {
@ -264,13 +265,13 @@ func (ref *TypeReference) IsPtrToIntf() bool {
}
func (ref *TypeReference) IsNamed() bool {
_, isSlice := ref.GO.(*types.Named)
return isSlice
_, ok := ref.GO.(*types.Named)
return ok
}
func (ref *TypeReference) IsStruct() bool {
_, isStruct := ref.GO.Underlying().(*types.Struct)
return isStruct
_, ok := ref.GO.Underlying().(*types.Struct)
return ok
}
func (ref *TypeReference) IsScalar() bool {
@ -362,6 +363,9 @@ func unwrapOmittable(t types.Type) (types.Type, bool) {
}
func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) {
if bindTarget != nil {
bindTarget = code.Unalias(bindTarget)
}
if innerType, ok := unwrapOmittable(bindTarget); ok {
if schemaType.NonNull {
return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name())
@ -433,28 +437,28 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret
if err != nil {
return nil, err
}
t := code.Unalias(obj.Type())
if values := b.enumValues(def); len(values) > 0 {
err = b.enumReference(ref, obj, values)
if err != nil {
return nil, err
}
} else if fun, isFunc := obj.(*types.Func); isFunc {
ref.GO = fun.Type().(*types.Signature).Params().At(0).Type()
ref.IsContext = fun.Type().(*types.Signature).Results().At(0).Type().String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler"
ref.GO = code.Unalias(t.(*types.Signature).Params().At(0).Type())
ref.IsContext = code.Unalias(t.(*types.Signature).Results().At(0).Type()).String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler"
ref.Marshaler = fun
ref.Unmarshaler = types.NewFunc(0, fun.Pkg(), "Unmarshal"+typeName, nil)
} else if hasMethod(obj.Type(), "MarshalGQLContext") && hasMethod(obj.Type(), "UnmarshalGQLContext") {
ref.GO = obj.Type()
} else if hasMethod(t, "MarshalGQLContext") && hasMethod(t, "UnmarshalGQLContext") {
ref.GO = t
ref.IsContext = true
ref.IsMarshaler = true
} else if hasMethod(obj.Type(), "MarshalGQL") && hasMethod(obj.Type(), "UnmarshalGQL") {
ref.GO = obj.Type()
} else if hasMethod(t, "MarshalGQL") && hasMethod(t, "UnmarshalGQL") {
ref.GO = t
ref.IsMarshaler = true
} else if underlying := basicUnderlying(obj.Type()); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String {
} else if underlying := basicUnderlying(t); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String {
// TODO delete before v1. Backwards compatibility case for named types wrapping strings (see #595)
ref.GO = obj.Type()
ref.GO = t
ref.CastType = underlying
underlyingRef, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil)
@ -465,7 +469,7 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret
ref.Marshaler = underlyingRef.Marshaler
ref.Unmarshaler = underlyingRef.Unmarshaler
} else {
ref.GO = obj.Type()
ref.GO = t
}
ref.Target = ref.GO
@ -478,7 +482,7 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret
ref.GO = bindTarget
}
ref.PointersInUmarshalInput = b.cfg.ReturnPointersInUmarshalInput
ref.PointersInUnmarshalInput = b.cfg.ReturnPointersInUnmarshalInput
return ref, nil
}
@ -516,6 +520,10 @@ func (b *Binder) CopyModifiersFromAst(t *ast.Type, base types.Type) types.Type {
}
func IsNilable(t types.Type) bool {
// Note that we use types.Unalias rather than code.Unalias here
// because we want to always check the underlying type.
// code.Unalias only unwraps aliases in Go 1.23
t = types.Unalias(t)
if namedType, isNamed := t.(*types.Named); isNamed {
return IsNilable(namedType.Underlying())
}
@ -587,10 +595,11 @@ func (b *Binder) enumReference(ref *TypeReference, obj types.Object, values map[
return fmt.Errorf("not all enum values are binded for %v", ref.Definition.Name)
}
if fn, ok := obj.Type().(*types.Signature); ok {
ref.GO = fn.Params().At(0).Type()
t := code.Unalias(obj.Type())
if fn, ok := t.(*types.Signature); ok {
ref.GO = code.Unalias(fn.Params().At(0).Type())
} else {
ref.GO = obj.Type()
ref.GO = t
}
str, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil)
@ -618,9 +627,10 @@ func (b *Binder) enumReference(ref *TypeReference, obj types.Object, values map[
return err
}
if !types.AssignableTo(valueObj.Type(), ref.GO) {
valueTyp := code.Unalias(valueObj.Type())
if !types.AssignableTo(valueTyp, ref.GO) {
return fmt.Errorf("wrong type: %v, for enum value: %v, expected type: %v, of enum: %v",
valueObj.Type(), value.Name, ref.GO, ref.Definition.Name)
valueTyp, value.Name, ref.GO, ref.Definition.Name)
}
switch valueObj.(type) {

View File

@ -42,16 +42,22 @@ type Config struct {
OmitRootModels bool `yaml:"omit_root_models,omitempty"`
OmitResolverFields bool `yaml:"omit_resolver_fields,omitempty"`
OmitPanicHandler bool `yaml:"omit_panic_handler,omitempty"`
StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"`
ReturnPointersInUmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"`
ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"`
NullableInputOmittable bool `yaml:"nullable_input_omittable,omitempty"`
EnableModelJsonOmitemptyTag *bool `yaml:"enable_model_json_omitempty_tag,omitempty"`
SkipValidation bool `yaml:"skip_validation,omitempty"`
SkipModTidy bool `yaml:"skip_mod_tidy,omitempty"`
Sources []*ast.Source `yaml:"-"`
Packages *code.Packages `yaml:"-"`
Schema *ast.Schema `yaml:"-"`
// If this is set to true, argument directives that
// decorate a field with a null value will still be called.
//
// This enables argumment directives to not just mutate
// argument values but to set them even if they're null.
CallArgumentDirectivesWithNull bool `yaml:"call_argument_directives_with_null,omitempty"`
StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"`
ReturnPointersInUnmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"`
ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"`
NullableInputOmittable bool `yaml:"nullable_input_omittable,omitempty"`
EnableModelJsonOmitemptyTag *bool `yaml:"enable_model_json_omitempty_tag,omitempty"`
SkipValidation bool `yaml:"skip_validation,omitempty"`
SkipModTidy bool `yaml:"skip_mod_tidy,omitempty"`
Sources []*ast.Source `yaml:"-"`
Packages *code.Packages `yaml:"-"`
Schema *ast.Schema `yaml:"-"`
// Deprecated: use Federation instead. Will be removed next release
Federated bool `yaml:"federated,omitempty"`
@ -62,15 +68,15 @@ var cfgFilenames = []string{".gqlgen.yml", "gqlgen.yml", "gqlgen.yaml"}
// DefaultConfig creates a copy of the default config
func DefaultConfig() *Config {
return &Config{
SchemaFilename: StringList{"schema.graphql"},
Model: PackageConfig{Filename: "models_gen.go"},
Exec: ExecConfig{Filename: "generated.go"},
Directives: map[string]DirectiveConfig{},
Models: TypeMap{},
StructFieldsAlwaysPointers: true,
ReturnPointersInUmarshalInput: false,
ResolversAlwaysReturnPointers: true,
NullableInputOmittable: false,
SchemaFilename: StringList{"schema.graphql"},
Model: PackageConfig{Filename: "models_gen.go"},
Exec: ExecConfig{Filename: "generated.go"},
Directives: map[string]DirectiveConfig{},
Models: TypeMap{},
StructFieldsAlwaysPointers: true,
ReturnPointersInUnmarshalInput: false,
ResolversAlwaysReturnPointers: true,
NullableInputOmittable: false,
}
}
@ -320,24 +326,33 @@ func (c *Config) injectTypesFromSchema() error {
}
}
if schemaType.Kind == ast.Object || schemaType.Kind == ast.InputObject {
if schemaType.Kind == ast.Object ||
schemaType.Kind == ast.InputObject ||
schemaType.Kind == ast.Interface {
for _, field := range schemaType.Fields {
if fd := field.Directives.ForName("goField"); fd != nil {
forceResolver := c.Models[schemaType.Name].Fields[field.Name].Resolver
fieldName := c.Models[schemaType.Name].Fields[field.Name].FieldName
if ra := fd.Arguments.ForName("forceResolver"); ra != nil {
if fr, err := ra.Value.Value(nil); err == nil {
forceResolver = fr.(bool)
}
}
fieldName := c.Models[schemaType.Name].Fields[field.Name].FieldName
if na := fd.Arguments.ForName("name"); na != nil {
if fr, err := na.Value.Value(nil); err == nil {
fieldName = fr.(string)
}
}
omittable := c.Models[schemaType.Name].Fields[field.Name].Omittable
if arg := fd.Arguments.ForName("omittable"); arg != nil {
if k, err := arg.Value.Value(nil); err == nil {
val := k.(bool)
omittable = &val
}
}
if c.Models[schemaType.Name].Fields == nil {
c.Models[schemaType.Name] = TypeMapEntry{
Model: c.Models[schemaType.Name].Model,
@ -349,6 +364,7 @@ func (c *Config) injectTypesFromSchema() error {
c.Models[schemaType.Name].Fields[field.Name] = TypeMapField{
FieldName: fieldName,
Resolver: forceResolver,
Omittable: omittable,
}
}
}
@ -449,6 +465,7 @@ type TypeMapEntry struct {
type TypeMapField struct {
Resolver bool `yaml:"resolver"`
FieldName string `yaml:"fieldName"`
Omittable *bool `yaml:"omittable"`
GeneratedMethod string `yaml:"-"`
}
@ -659,6 +676,16 @@ func (tm TypeMap) ForceGenerate(name string, forceGenerate bool) {
type DirectiveConfig struct {
SkipRuntime bool `yaml:"skip_runtime"`
// If the directive implementation is statically defined, don't provide a hook for it
// in the generated server. This is useful for directives that are implemented
// by plugins or the runtime itself.
//
// The function implemmentation should be provided here as a string.
//
// The function should have the following signature:
// func(ctx context.Context, obj any, next graphql.Resolver[, directive arguments if any]) (res any, err error)
Implementation *string
}
func inStrSlice(haystack []string, needle string) bool {

View File

@ -14,7 +14,7 @@ type GoInitialismsConfig struct {
Initialisms []string `yaml:"initialisms"`
}
// setInitialisms adjustes GetInitialisms based on its settings.
// setInitialisms adjusts GetInitialisms based on its settings.
func (i GoInitialismsConfig) setInitialisms() {
toUse := i.determineGoInitialisms()
templates.GetInitialisms = func() map[string]bool {
@ -22,7 +22,7 @@ func (i GoInitialismsConfig) setInitialisms() {
}
}
// determineGoInitialisms returns the Go initialims to be used, based on its settings.
// determineGoInitialisms returns the Go initialisms to be used, based on its settings.
func (i GoInitialismsConfig) determineGoInitialisms() (initialismsToUse map[string]bool) {
if i.ReplaceDefaults {
initialismsToUse = make(map[string]bool, len(i.Initialisms))

View File

@ -64,6 +64,30 @@ type builder struct {
Directives map[string]*Directive
}
// Get only the directives which should have a user provided definition on server instantiation
func (d *Data) UserDirectives() DirectiveList {
res := DirectiveList{}
directives := d.Directives()
for k, directive := range directives {
if directive.Implementation == nil {
res[k] = directive
}
}
return res
}
// Get only the directives which should have a statically provided definition
func (d *Data) BuiltInDirectives() DirectiveList {
res := DirectiveList{}
directives := d.Directives()
for k, directive := range directives {
if directive.Implementation != nil {
res[k] = directive
}
}
return res
}
// Get only the directives which are defined in the config's sources.
func (d *Data) Directives() DirectiveList {
res := DirectiveList{}
@ -97,7 +121,7 @@ func BuildData(cfg *config.Config, plugins ...any) (*Data, error) {
dataDirectives := make(map[string]*Directive)
for name, d := range b.Directives {
if !d.Builtin {
if !d.SkipRuntime {
dataDirectives[name] = d
}
}

View File

@ -7,6 +7,7 @@ import (
"github.com/vektah/gqlparser/v2/ast"
"github.com/99designs/gqlgen/codegen/config"
"github.com/99designs/gqlgen/codegen/templates"
)
@ -19,9 +20,10 @@ func (dl DirectiveList) LocationDirectives(location string) DirectiveList {
type Directive struct {
*ast.DirectiveDefinition
Name string
Args []*FieldArgument
Builtin bool
Name string
Args []*FieldArgument
config.DirectiveConfig
}
// IsLocation check location directive
@ -82,7 +84,7 @@ func (b *builder) buildDirectives() (map[string]*Directive, error) {
DirectiveDefinition: dir,
Name: name,
Args: args,
Builtin: b.Config.Directives[name].SkipRuntime,
DirectiveConfig: b.Config.Directives[name],
}
}
@ -122,7 +124,7 @@ func (b *builder) getDirectives(list ast.DirectiveList) ([]*Directive, error) {
Name: d.Name,
Args: args,
DirectiveDefinition: list[i].Definition,
Builtin: b.Config.Directives[d.Name].SkipRuntime,
DirectiveConfig: b.Config.Directives[d.Name],
}
}
@ -162,8 +164,12 @@ func (d *Directive) ResolveArgs(obj string, next int) string {
return strings.Join(args, ", ")
}
func (d *Directive) CallName() string {
return ucFirst(d.Name)
}
func (d *Directive) Declaration() string {
res := ucFirst(d.Name) + " func(ctx context.Context, obj interface{}, next graphql.Resolver"
res := d.CallName() + " func(ctx context.Context, obj interface{}, next graphql.Resolver"
for _, arg := range d.Args {
res += fmt.Sprintf(", %s %s", templates.ToGoPrivate(arg.Name), templates.CurrentImports.LookupType(arg.TypeReference.GO))
@ -172,3 +178,23 @@ func (d *Directive) Declaration() string {
res += ") (res interface{}, err error)"
return res
}
func (d *Directive) IsBuiltIn() bool {
return d.Implementation != nil
}
func (d *Directive) CallPath() string {
if d.IsBuiltIn() {
return "builtInDirective" + d.CallName()
}
return "ec.directives." + d.CallName()
}
func (d *Directive) FunctionImpl() string {
if d.Implementation == nil {
return ""
}
return d.CallPath() + " = " + *d.Implementation
}

View File

@ -1,23 +1,29 @@
{{ define "implDirectives" }}{{ $in := .DirectiveObjName }}
{{ $zeroVal := .TypeReference.GO | ref}}
{{- range $i, $directive := .ImplDirectives -}}
directive{{add $i 1}} := func(ctx context.Context) (interface{}, error) {
{{- range $arg := $directive.Args }}
{{- if notNil "Value" $arg }}
{{ $arg.VarName }}, err := ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, {{ $arg.Value | dump }})
if err != nil{
return nil, err
var zeroVal {{$zeroVal}}
return zeroVal, err
}
{{- else if notNil "Default" $arg }}
{{ $arg.VarName }}, err := ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, {{ $arg.Default | dump }})
if err != nil{
return nil, err
var zeroVal {{$zeroVal}}
return zeroVal, err
}
{{- end }}
{{- end }}
if ec.directives.{{$directive.Name|ucFirst}} == nil {
return nil, errors.New("directive {{$directive.Name}} is not implemented")
}
return ec.directives.{{$directive.Name|ucFirst}}({{$directive.ResolveArgs $in $i }})
{{- if not $directive.IsBuiltIn}}
if {{$directive.CallPath}} == nil {
var zeroVal {{$zeroVal}}
return zeroVal, errors.New("directive {{$directive.Name}} is not implemented")
}
{{- end}}
return {{$directive.CallPath}}({{$directive.ResolveArgs $in $i }})
}
{{ end -}}
{{ end }}
@ -37,10 +43,7 @@
{{- end }}
n := next
next = func(ctx context.Context) (interface{}, error) {
if ec.directives.{{$directive.Name|ucFirst}} == nil {
return nil, errors.New("directive {{$directive.Name}} is not implemented")
}
return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}})
{{- template "callDirective" $directive -}}
}
{{- end }}
}
@ -57,6 +60,15 @@
return graphql.Null
{{end}}
{{define "callDirective"}}
{{- if not .IsBuiltIn}}
if {{.CallPath}} == nil {
return nil, errors.New("directive {{.Name}} is not implemented")
}
{{- end}}
return {{.CallPath}}({{.CallArgs}})
{{end}}
{{ if .Directives.LocationDirectives "QUERY" }}
func (ec *executionContext) _queryMiddleware(ctx context.Context, obj *ast.OperationDefinition, next func(ctx context.Context) (interface{}, error)) graphql.Marshaler {
{{ template "queryDirectives" .Directives.LocationDirectives "QUERY" }}
@ -87,10 +99,7 @@ func (ec *executionContext) _subscriptionMiddleware(ctx context.Context, obj *as
{{- end }}
n := next
next = func(ctx context.Context) (interface{}, error) {
if ec.directives.{{$directive.Name|ucFirst}} == nil {
return nil, errors.New("directive {{$directive.Name}} is not implemented")
}
return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}})
{{- template "callDirective" $directive -}}
}
{{- end }}
}
@ -130,10 +139,7 @@ func (ec *executionContext) _subscriptionMiddleware(ctx context.Context, obj *as
{{- end }}
n := next
next = func(ctx context.Context) (interface{}, error) {
if ec.directives.{{$directive.Name|ucFirst}} == nil {
return nil, errors.New("directive {{$directive.Name}} is not implemented")
}
return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}})
{{- template "callDirective" $directive -}}
}
{{- end }}
}

View File

@ -16,6 +16,7 @@ import (
"github.com/99designs/gqlgen/codegen/config"
"github.com/99designs/gqlgen/codegen/templates"
"github.com/99designs/gqlgen/internal/code"
)
type Field struct {
@ -144,7 +145,7 @@ func (b *builder) bindField(obj *Object, f *Field) (errret error) {
f.GoFieldName = b.Config.Models[obj.Name].Fields[f.Name].FieldName
}
target, err := b.findBindTarget(obj.Type.(*types.Named), f.GoFieldName)
target, err := b.findBindTarget(obj.Type, f.GoFieldName)
if err != nil {
return err
}
@ -229,7 +230,7 @@ func (b *builder) bindField(obj *Object, f *Field) (errret error) {
}
// findBindTarget attempts to match the name to a field or method on a Type
// with the following priorites:
// with the following priorities:
// 1. Any Fields with a struct tag (see config.StructTag). Errors if more than one match is found
// 2. Any method or field with a matching name. Errors if more than one match is found
// 3. Same logic again for embedded fields
@ -380,7 +381,7 @@ func (b *builder) findBindStructEmbedsTarget(strukt *types.Struct, name string)
continue
}
fieldType := field.Type()
fieldType := code.Unalias(field.Type())
if ptr, ok := fieldType.(*types.Pointer); ok {
fieldType = ptr.Elem()
}
@ -442,7 +443,7 @@ func (f *Field) ImplDirectives() []*Directive {
loc = ast.LocationInputFieldDefinition
}
for i := range f.Directives {
if !f.Directives[i].Builtin &&
if !f.Directives[i].SkipRuntime &&
(f.Directives[i].IsLocation(loc, ast.LocationObject) || f.Directives[i].IsLocation(loc, ast.LocationInputObject)) {
d = append(d, f.Directives[i])
}

View File

@ -1,3 +1,4 @@
{{/* Context object: codegen.Data */}}
{{ reserveImport "context" }}
{{ reserveImport "fmt" }}
{{ reserveImport "io" }}
@ -46,7 +47,7 @@
}
type DirectiveRoot struct {
{{ range $directive := .Directives }}
{{ range $directive := .UserDirectives }}
{{- $directive.Declaration }}
{{ end }}
}
@ -93,6 +94,12 @@
{{- end }}
{{- end }}
{{ range $directive := .BuiltInDirectives }}
var (
{{- $directive.FunctionImpl }}
)
{{ end }}
{{ if eq .Config.Exec.Layout "single-file" }}
type executableSchema struct {
schema *ast.Schema

View File

@ -1,10 +1,10 @@
{{- range $input := .Inputs }}
{{- if not .HasUnmarshal }}
{{- $it := "it" }}
{{- if .PointersInUmarshalInput }}
{{- if .PointersInUnmarshalInput }}
{{- $it = "&it" }}
{{- end }}
func (ec *executionContext) unmarshalInput{{ .Name }}(ctx context.Context, obj interface{}) ({{ if .PointersInUmarshalInput }}*{{ end }}{{.Type | ref}}, error) {
func (ec *executionContext) unmarshalInput{{ .Name }}(ctx context.Context, obj interface{}) ({{ if .PointersInUnmarshalInput }}*{{ end }}{{.Type | ref}}, error) {
{{- if $input.IsMap }}
it := make(map[string]interface{}, len(obj.(map[string]interface{})))
{{- else }}

View File

@ -26,15 +26,15 @@ const (
type Object struct {
*ast.Definition
Type types.Type
ResolverInterface types.Type
Root bool
Fields []*Field
Implements []*ast.Definition
DisableConcurrency bool
Stream bool
Directives []*Directive
PointersInUmarshalInput bool
Type types.Type
ResolverInterface types.Type
Root bool
Fields []*Field
Implements []*ast.Definition
DisableConcurrency bool
Stream bool
Directives []*Directive
PointersInUnmarshalInput bool
}
func (b *builder) buildObject(typ *ast.Definition) (*Object, error) {
@ -44,12 +44,12 @@ func (b *builder) buildObject(typ *ast.Definition) (*Object, error) {
}
caser := cases.Title(language.English, cases.NoLower)
obj := &Object{
Definition: typ,
Root: b.Config.IsRoot(typ),
DisableConcurrency: typ == b.Schema.Mutation,
Stream: typ == b.Schema.Subscription,
Directives: dirs,
PointersInUmarshalInput: b.Config.ReturnPointersInUmarshalInput,
Definition: typ,
Root: b.Config.IsRoot(typ),
DisableConcurrency: typ == b.Schema.Mutation,
Stream: typ == b.Schema.Subscription,
Directives: dirs,
PointersInUnmarshalInput: b.Config.ReturnPointersInUnmarshalInput,
ResolverInterface: types.NewNamed(
types.NewTypeName(0, b.Config.Exec.Pkg(), caser.String(typ.Name)+"Resolver", nil),
nil,

View File

@ -1,3 +1,4 @@
{{/* Context object: codegen.Data */}}
{{ reserveImport "context" }}
{{ reserveImport "fmt" }}
{{ reserveImport "io" }}
@ -45,7 +46,7 @@ type ResolverRoot interface {
}
type DirectiveRoot struct {
{{ range $directive := .Directives }}
{{ range $directive := .UserDirectives }}
{{- $directive.Declaration }}
{{ end }}
}
@ -67,6 +68,12 @@ type ComplexityRoot struct {
{{- end }}
}
{{ range $directive := .BuiltInDirectives }}
var (
{{- $directive.FunctionImpl }}
)
{{ end }}
type executableSchema struct {
schema *ast.Schema
resolvers ResolverRoot

View File

@ -495,18 +495,18 @@ func wordWalker(str string, f func(*wordInfo)) {
if initialisms[upperWord] {
// If the uppercase word (string(runes[w:i]) is "ID" or "IP"
// AND
// the word is the first two characters of the str
// the word is the first two characters of the current word
// AND
// that is not the end of the word
// AND
// the length of the string is greater than 3
// the length of the remaining string is greater than 3
// AND
// the third rune is an uppercase one
// THEN
// do NOT count this as an initialism.
switch upperWord {
case "ID", "IP":
if word == str[:2] && !eow && len(str) > 3 && unicode.IsUpper(runes[3]) {
if remainingRunes := runes[w:]; word == string(remainingRunes[:2]) && !eow && len(remainingRunes) > 3 && unicode.IsUpper(remainingRunes[3]) {
continue
}
}
@ -694,7 +694,7 @@ var pkgReplacer = strings.NewReplacer(
func TypeIdentifier(t types.Type) string {
res := ""
for {
switch it := t.(type) {
switch it := code.Unalias(t).(type) {
case *types.Pointer:
t.Underlying()
res += "ᚖ"
@ -771,6 +771,8 @@ var CommonInitialisms = map[string]bool{
"XMPP": true,
"XSRF": true,
"XSS": true,
"AWS": true,
"GCP": true,
}
// GetInitialisms returns the initialisms to capitalize in Go names. If unchanged, default initialisms will be returned

View File

@ -76,9 +76,9 @@
return res, graphql.ErrorOnPath(ctx, err)
{{- else }}
res, err := ec.unmarshalInput{{ $type.GQL.Name }}(ctx, v)
{{- if and $type.IsNilable (not $type.IsMap) (not $type.PointersInUmarshalInput) }}
{{- if and $type.IsNilable (not $type.IsMap) (not $type.PointersInUnmarshalInput) }}
return &res, graphql.ErrorOnPath(ctx, err)
{{- else if and (not $type.IsNilable) $type.PointersInUmarshalInput }}
{{- else if and (not $type.IsNilable) $type.PointersInUnmarshalInput }}
return *res, graphql.ErrorOnPath(ctx, err)
{{- else }}
return res, graphql.ErrorOnPath(ctx, err)

View File

@ -20,6 +20,8 @@ func UnmarshalBoolean(v any) (bool, error) {
return v != 0, nil
case bool:
return v, nil
case nil:
return false, nil
default:
return false, fmt.Errorf("%T is not a bool", v)
}

View File

@ -3,27 +3,29 @@ package graphql
import "context"
// Cache is a shared store for APQ and query AST caching
type Cache interface {
type Cache[T any] interface {
// Get looks up a key's value from the cache.
Get(ctx context.Context, key string) (value any, ok bool)
Get(ctx context.Context, key string) (value T, ok bool)
// Add adds a value to the cache.
Add(ctx context.Context, key string, value any)
Add(ctx context.Context, key string, value T)
}
// MapCache is the simplest implementation of a cache, because it can not evict it should only be used in tests
type MapCache map[string]any
type MapCache[T any] map[string]T
// Get looks up a key's value from the cache.
func (m MapCache) Get(_ context.Context, key string) (value any, ok bool) {
func (m MapCache[T]) Get(_ context.Context, key string) (value T, ok bool) {
v, ok := m[key]
return v, ok
}
// Add adds a value to the cache.
func (m MapCache) Add(_ context.Context, key string, value any) { m[key] = value }
func (m MapCache[T]) Add(_ context.Context, key string, value T) { m[key] = value }
type NoCache struct{}
type NoCache[T any, T2 *T] struct{}
func (n NoCache) Get(_ context.Context, _ string) (value any, ok bool) { return nil, false }
func (n NoCache) Add(_ context.Context, _ string, _ any) {}
var _ Cache[*string] = (*NoCache[string, *string])(nil)
func (n NoCache[T, T2]) Get(_ context.Context, _ string) (value T2, ok bool) { return nil, false }
func (n NoCache[T, T2]) Add(_ context.Context, _ string, _ T2) {}

View File

@ -22,7 +22,7 @@ type Executor struct {
errorPresenter graphql.ErrorPresenterFunc
recoverFunc graphql.RecoverFunc
queryCache graphql.Cache
queryCache graphql.Cache[*ast.QueryDocument]
parserTokenLimit int
}
@ -36,7 +36,7 @@ func New(es graphql.ExecutableSchema) *Executor {
es: es,
errorPresenter: graphql.DefaultErrorPresenter,
recoverFunc: graphql.DefaultRecover,
queryCache: graphql.NoCache{},
queryCache: graphql.NoCache[ast.QueryDocument, *ast.QueryDocument]{},
ext: processExtensions(nil),
parserTokenLimit: parserTokenNoLimit,
}
@ -84,7 +84,6 @@ func (e *Executor) CreateOperationContext(
var err error
rc.Variables, err = validator.VariableValues(e.es.Schema(), rc.Operation, params.Variables)
if err != nil {
gqlErr, ok := err.(*gqlerror.Error)
if ok {
@ -162,7 +161,7 @@ func (e *Executor) PresentRecoveredError(ctx context.Context, err any) error {
return e.errorPresenter(ctx, e.recoverFunc(ctx, err))
}
func (e *Executor) SetQueryCache(cache graphql.Cache) {
func (e *Executor) SetQueryCache(cache graphql.Cache[*ast.QueryDocument]) {
e.queryCache = cache
}
@ -195,7 +194,7 @@ func (e *Executor) parseQuery(
stats.Parsing.End = now
stats.Validation.Start = now
return doc.(*ast.QueryDocument), nil
return doc, nil
}
doc, err := parser.ParseQueryWithTokenLimit(&ast.Source{Input: query}, e.parserTokenLimit)

View File

@ -28,6 +28,8 @@ func UnmarshalFloat(v any) (float64, error) {
return v, nil
case json.Number:
return strconv.ParseFloat(string(v), 64)
case nil:
return 0, nil
default:
return 0, fmt.Errorf("%T is not an float", v)
}

View File

@ -23,7 +23,7 @@ const (
// hash in the next request.
// see https://github.com/apollographql/apollo-link-persisted-queries
type AutomaticPersistedQuery struct {
Cache graphql.Cache
Cache graphql.Cache[string]
}
type ApqStats struct {
@ -72,14 +72,14 @@ func (a AutomaticPersistedQuery) MutateOperationParameters(ctx context.Context,
fullQuery := false
if rawParams.Query == "" {
var ok bool
// client sent optimistic query hash without query string, get it from the cache
query, ok := a.Cache.Get(ctx, extension.Sha256)
rawParams.Query, ok = a.Cache.Get(ctx, extension.Sha256)
if !ok {
err := gqlerror.Errorf(errPersistedQueryNotFound)
errcode.Set(err, errPersistedQueryNotFoundCode)
return err
}
rawParams.Query = query.(string)
} else {
// client sent optimistic query hash with query string, verify and store it
if computeQueryHash(rawParams.Query) != extension.Sha256 {

View File

@ -8,26 +8,26 @@ import (
"github.com/99designs/gqlgen/graphql"
)
type LRU struct {
lru *lru.Cache[string, any]
type LRU[T any] struct {
lru *lru.Cache[string, T]
}
var _ graphql.Cache = &LRU{}
var _ graphql.Cache[any] = &LRU[any]{}
func New(size int) *LRU {
cache, err := lru.New[string, any](size)
func New[T any](size int) *LRU[T] {
cache, err := lru.New[string, T](size)
if err != nil {
// An error is only returned for non-positive cache size
// and we already checked for that.
panic("unexpected error creating cache: " + err.Error())
}
return &LRU{cache}
return &LRU[T]{cache}
}
func (l LRU) Get(ctx context.Context, key string) (value any, ok bool) {
func (l LRU[T]) Get(ctx context.Context, key string) (value T, ok bool) {
return l.lru.Get(key)
}
func (l LRU) Add(ctx context.Context, key string, value any) {
func (l LRU[T]) Add(ctx context.Context, key string, value T) {
l.lru.Add(key, value)
}

View File

@ -8,6 +8,7 @@ import (
"net/http"
"time"
"github.com/vektah/gqlparser/v2/ast"
"github.com/vektah/gqlparser/v2/gqlerror"
"github.com/99designs/gqlgen/graphql"
@ -41,11 +42,11 @@ func NewDefaultServer(es graphql.ExecutableSchema) *Server {
srv.AddTransport(transport.POST{})
srv.AddTransport(transport.MultipartForm{})
srv.SetQueryCache(lru.New(1000))
srv.SetQueryCache(lru.New[*ast.QueryDocument](1000))
srv.Use(extension.Introspection{})
srv.Use(extension.AutomaticPersistedQuery{
Cache: lru.New(100),
Cache: lru.New[string](100),
})
return srv
@ -63,7 +64,7 @@ func (s *Server) SetRecoverFunc(f graphql.RecoverFunc) {
s.exec.SetRecoverFunc(f)
}
func (s *Server) SetQueryCache(cache graphql.Cache) {
func (s *Server) SetQueryCache(cache graphql.Cache[*ast.QueryDocument]) {
s.exec.SetQueryCache(cache)
}

View File

@ -88,7 +88,6 @@ func cleanupBody(body string) (out string, err error) {
// is where query starts. If it is, query is url encoded.
if strings.HasPrefix(body, "%7B") {
body, err = url.QueryUnescape(body)
if err != nil {
return body, err
}

View File

@ -198,7 +198,7 @@ func (c *wsConnection) init() bool {
var ctx context.Context
ctx, initAckPayload, err = c.InitFunc(c.ctx, c.initPayload)
if err != nil {
c.sendConnectionError(err.Error())
c.sendConnectionError("%s", err.Error())
c.close(websocket.CloseNormalClosure, "terminated")
return false
}
@ -239,7 +239,6 @@ func (c *wsConnection) run() {
ctx, cancel := context.WithCancel(c.ctx)
defer func() {
cancel()
c.close(websocket.CloseAbnormalClosure, "unexpected closure")
}()
// If we're running in graphql-ws mode, create a timer that will trigger a
@ -369,7 +368,7 @@ func (c *wsConnection) closeOnCancel(ctx context.Context) {
<-ctx.Done()
if r := closeReasonForContext(ctx); r != "" {
c.sendConnectionError(r)
c.sendConnectionError("%s", r)
}
c.close(websocket.CloseNormalClosure, "terminated")
}

View File

@ -23,6 +23,8 @@ func UnmarshalInt(v any) (int, error) {
return int(v), nil
case json.Number:
return strconv.Atoi(string(v))
case nil:
return 0, nil
default:
return 0, fmt.Errorf("%T is not an int", v)
}
@ -44,6 +46,8 @@ func UnmarshalInt64(v any) (int64, error) {
return v, nil
case json.Number:
return strconv.ParseInt(string(v), 10, 64)
case nil:
return 0, nil
default:
return 0, fmt.Errorf("%T is not an int", v)
}
@ -73,6 +77,8 @@ func UnmarshalInt32(v any) (int32, error) {
return 0, err
}
return int32(iv), nil
case nil:
return 0, nil
default:
return 0, fmt.Errorf("%T is not an int", v)
}

View File

@ -62,7 +62,7 @@ func UnmarshalString(v any) (string, error) {
case bool:
return strconv.FormatBool(v), nil
case nil:
return "null", nil
return "", nil
default:
return "", fmt.Errorf("%T is not a string", v)
}

View File

@ -34,6 +34,8 @@ func UnmarshalUint(v any) (uint, error) {
case json.Number:
u64, err := strconv.ParseUint(string(v), 10, 64)
return uint(u64), err
case nil:
return 0, nil
default:
return 0, fmt.Errorf("%T is not an uint", v)
}
@ -63,6 +65,8 @@ func UnmarshalUint64(v any) (uint64, error) {
return uint64(v), nil
case json.Number:
return strconv.ParseUint(string(v), 10, 64)
case nil:
return 0, nil
default:
return 0, fmt.Errorf("%T is not an uint", v)
}
@ -100,6 +104,8 @@ func UnmarshalUint32(v any) (uint32, error) {
return 0, err
}
return uint32(iv), nil
case nil:
return 0, nil
default:
return 0, fmt.Errorf("%T is not an uint", v)
}

View File

@ -1,3 +1,3 @@
package graphql
const Version = "v0.17.49"
const Version = "v0.17.55"

View File

@ -11,6 +11,9 @@ exec:
# federation:
# filename: graph/federation.go
# package: graph
# version: 2
# options
# computed_requires: true
# Where should any generated models go?
model:
@ -63,6 +66,13 @@ resolver:
# Optional: set to skip running `go mod tidy` when generating server code
# skip_mod_tidy: true
# Optional: if this is set to true, argument directives that
# decorate a field with a null value will still be called.
#
# This enables argumment directives to not just mutate
# argument values but to set them even if they're null.
call_argument_directives_with_null: true
# gqlgen will search for any type names in the schema in these go packages
# if they match it will use them, otherwise it will generate them.
autobind:

View File

@ -0,0 +1,13 @@
//go:build !go1.23
package code
import (
"go/types"
)
// Unalias unwraps an alias type
// TODO: Drop this function when we drop support for go1.22
func Unalias(t types.Type) types.Type {
return t // No-op
}

View File

@ -0,0 +1,19 @@
//go:build go1.23
package code
import (
"go/types"
)
// Unalias unwraps an alias type
func Unalias(t types.Type) types.Type {
if p, ok := t.(*types.Pointer); ok {
// If the type come from auto-binding,
// it will be a pointer to an alias type.
// (e.g: `type Cursor = entgql.Cursor[int]`)
// *ent.Cursor is the type we got from auto-binding.
return types.NewPointer(Unalias(p.Elem()))
}
return types.Unalias(t)
}

View File

@ -8,6 +8,8 @@ import (
// CompatibleTypes isnt a strict comparison, it allows for pointer differences
func CompatibleTypes(expected, actual types.Type) error {
// Unwrap any aliases
expected, actual = Unalias(expected), Unalias(actual)
// Special case to deal with pointer mismatches
{
expectedPtr, expectedIsPtr := expected.(*types.Pointer)

View File

@ -0,0 +1,185 @@
package federation
import (
"github.com/99designs/gqlgen/codegen/config"
"github.com/vektah/gqlparser/v2/ast"
)
// The name of the field argument that is injected into the resolver to support @requires.
const fieldArgRequires = "_federationRequires"
// The name of the scalar type used in the injected field argument to support @requires.
const mapTypeName = "_RequiresMap"
// The @key directive that defines the key fields for an entity.
const dirNameKey = "key"
// The @requires directive that defines the required fields for an entity to be resolved.
const dirNameRequires = "requires"
// The @entityResolver directive allows users to specify entity resolvers as batch lookups
const dirNameEntityResolver = "entityResolver"
const dirNamePopulateFromRepresentations = "populateFromRepresentations"
var populateFromRepresentationsImplementation = `func(ctx context.Context, obj any, next graphql.Resolver) (res any, err error) {
fc := graphql.GetFieldContext(ctx)
// We get the Federation representations argument from the _entities resolver
representations, ok := fc.Parent.Parent.Args["representations"].([]map[string]any)
if !ok {
return nil, errors.New("must be called from within _entities")
}
// Get the index of the current entity in the representations list. This is
// set by the execution context after the _entities resolver is called.
index := fc.Parent.Index
if index == nil {
return nil, errors.New("couldn't find input index for entity")
}
if len(representations) < *index {
return nil, errors.New("representation not found")
}
return representations[*index], nil
}`
const DirNameEntityReference = "entityReference"
// The fields arguments must be provided to both key and requires directives.
const DirArgFields = "fields"
// Tells the code generator what type the directive is referencing
const DirArgType = "type"
// The file name for Federation directives
const dirGraphQLQFile = "federation/directives.graphql"
// The file name for Federation entities
const entityGraphQLQFile = "federation/entity.graphql"
const federationVersion1Schema = `
directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE
directive @requires(fields: _FieldSet!) on FIELD_DEFINITION
directive @provides(fields: _FieldSet!) on FIELD_DEFINITION
directive @extends on OBJECT | INTERFACE
directive @external on FIELD_DEFINITION
scalar _Any
scalar _FieldSet
`
const federationVersion2Schema = `
directive @authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM
directive @composeDirective(name: String!) repeatable on SCHEMA
directive @extends on OBJECT | INTERFACE
directive @external on OBJECT | FIELD_DEFINITION
directive @key(fields: FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE
directive @inaccessible on
| ARGUMENT_DEFINITION
| ENUM
| ENUM_VALUE
| FIELD_DEFINITION
| INPUT_FIELD_DEFINITION
| INPUT_OBJECT
| INTERFACE
| OBJECT
| SCALAR
| UNION
directive @interfaceObject on OBJECT
directive @link(import: [String!], url: String!) repeatable on SCHEMA
directive @override(from: String!, label: String) on FIELD_DEFINITION
directive @policy(policies: [[federation__Policy!]!]!) on
| FIELD_DEFINITION
| OBJECT
| INTERFACE
| SCALAR
| ENUM
directive @provides(fields: FieldSet!) on FIELD_DEFINITION
directive @requires(fields: FieldSet!) on FIELD_DEFINITION
directive @requiresScopes(scopes: [[federation__Scope!]!]!) on
| FIELD_DEFINITION
| OBJECT
| INTERFACE
| SCALAR
| ENUM
directive @shareable repeatable on FIELD_DEFINITION | OBJECT
directive @tag(name: String!) repeatable on
| ARGUMENT_DEFINITION
| ENUM
| ENUM_VALUE
| FIELD_DEFINITION
| INPUT_FIELD_DEFINITION
| INPUT_OBJECT
| INTERFACE
| OBJECT
| SCALAR
| UNION
scalar _Any
scalar FieldSet
scalar federation__Policy
scalar federation__Scope
`
var builtins = config.TypeMap{
"_Service": {
Model: config.StringList{
"github.com/99designs/gqlgen/plugin/federation/fedruntime.Service",
},
},
"_Entity": {
Model: config.StringList{
"github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity",
},
},
"Entity": {
Model: config.StringList{
"github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity",
},
},
"_Any": {
Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"},
},
"federation__Scope": {
Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"},
},
"federation__Policy": {
Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"},
},
}
var dirPopulateFromRepresentations = &ast.DirectiveDefinition{
Name: dirNamePopulateFromRepresentations,
IsRepeatable: false,
Description: `This is a runtime directive used to implement @requires. It's automatically placed
on the generated _federationRequires argument, and the implementation of it extracts the
correct value from the input representations list.`,
Locations: []ast.DirectiveLocation{ast.LocationArgumentDefinition},
Position: &ast.Position{Src: &ast.Source{
Name: dirGraphQLQFile,
}},
}
var dirEntityReference = &ast.DirectiveDefinition{
Name: DirNameEntityReference,
IsRepeatable: false,
Description: `This is a compile-time directive used to implement @requires.
It tells the code generator how to generate the model for the scalar.`,
Locations: []ast.DirectiveLocation{ast.LocationScalar},
Arguments: ast.ArgumentDefinitionList{
{
Name: DirArgType,
Type: ast.NonNullNamedType("String", nil),
Description: `The name of the entity that the fields selection
set should be validated against.`,
},
{
Name: DirArgFields,
Type: ast.NonNullNamedType("FieldSet", nil),
Description: "The selection that the scalar should generate into.",
},
},
Position: &ast.Position{Src: &ast.Source{
Name: dirGraphQLQFile,
}},
}

View File

@ -22,10 +22,12 @@ type Entity struct {
}
type EntityResolver struct {
ResolverName string // The resolver name, such as FindUserByID
KeyFields []*KeyField // The fields declared in @key.
InputType types.Type // The Go generated input type for multi entity resolvers
InputTypeName string
ResolverName string // The resolver name, such as FindUserByID
KeyFields []*KeyField // The fields declared in @key.
InputType types.Type // The Go generated input type for multi entity resolvers
InputTypeName string
ReturnType types.Type // The Go generated return type for the entity
ReturnTypeName string
}
func (e *EntityResolver) LookupInputType() string {
@ -60,7 +62,7 @@ func (e *Entity) isFieldImplicitlyExternal(field *ast.FieldDefinition, federatio
if federationVersion != 2 {
return false
}
// TODO: From the spec, it seems like if an entity is not resolvable then it should not only not have a resolver, but should not appear in the _Entitiy union.
// TODO: From the spec, it seems like if an entity is not resolvable then it should not only not have a resolver, but should not appear in the _Entity union.
// The current implementation is a less drastic departure from the previous behavior, but should probably be reviewed.
// See https://www.apollographql.com/docs/federation/subgraph-spec/
if e.isResolvable() {
@ -76,7 +78,7 @@ func (e *Entity) isFieldImplicitlyExternal(field *ast.FieldDefinition, federatio
// Determine if the entity is resolvable.
func (e *Entity) isResolvable() bool {
key := e.Def.Directives.ForName("key")
key := e.Def.Directives.ForName(dirNameKey)
if key == nil {
// If there is no key directive, the entity is resolvable.
return true
@ -102,11 +104,11 @@ func (e *Entity) isKeyField(field *ast.FieldDefinition) bool {
// Get the key fields for this entity.
func (e *Entity) keyFields() []string {
key := e.Def.Directives.ForName("key")
key := e.Def.Directives.ForName(dirNameKey)
if key == nil {
return []string{}
}
fields := key.Arguments.ForName("fields")
fields := key.Arguments.ForName(DirArgFields)
if fields == nil {
return []string{}
}

View File

@ -2,6 +2,7 @@ package federation
import (
_ "embed"
"errors"
"fmt"
"sort"
"strings"
@ -12,7 +13,6 @@ import (
"github.com/99designs/gqlgen/codegen/config"
"github.com/99designs/gqlgen/codegen/templates"
"github.com/99designs/gqlgen/internal/rewrite"
"github.com/99designs/gqlgen/plugin"
"github.com/99designs/gqlgen/plugin/federation/fieldset"
)
@ -22,56 +22,81 @@ var federationTemplate string
//go:embed requires.gotpl
var explicitRequiresTemplate string
type federation struct {
type Federation struct {
Entities []*Entity
Version int
PackageOptions map[string]bool
PackageOptions PackageOptions
version int
// true if @requires is used in the schema
usesRequires bool
}
type PackageOptions struct {
// ExplicitRequires will generate a function in the execution context
// to populate fields using the @required directive into the entity.
//
// You can only set one of ExplicitRequires or ComputedRequires to true.
ExplicitRequires bool
// ComputedRequires generates resolver functions to compute values for
// fields using the @required directive.
ComputedRequires bool
}
// New returns a federation plugin that injects
// federated directives and types into the schema
func New(version int) plugin.Plugin {
func New(version int, cfg *config.Config) (*Federation, error) {
if version == 0 {
version = 1
}
return &federation{Version: version}
options, err := buildPackageOptions(cfg)
if err != nil {
return nil, fmt.Errorf("invalid federation package options: %w", err)
}
return &Federation{
version: version,
PackageOptions: options,
}, nil
}
func buildPackageOptions(cfg *config.Config) (PackageOptions, error) {
packageOptions := cfg.Federation.Options
explicitRequires := packageOptions["explicit_requires"]
computedRequires := packageOptions["computed_requires"]
if explicitRequires && computedRequires {
return PackageOptions{}, errors.New("only one of explicit_requires or computed_requires can be set to true")
}
if computedRequires {
if cfg.Federation.Version != 2 {
return PackageOptions{}, errors.New("when using federation.options.computed_requires you must be using Federation 2")
}
// We rely on injecting a null argument with a directives for fields with @requires, so we need to ensure
// our directive is always called.
if !cfg.CallArgumentDirectivesWithNull {
return PackageOptions{}, errors.New("when using federation.options.computed_requires, call_argument_directives_with_null must be set to true")
}
}
// We rely on injecting a null argument with a directives for fields with @requires, so we need to ensure
// our directive is always called.
return PackageOptions{
ExplicitRequires: explicitRequires,
ComputedRequires: computedRequires,
}, nil
}
// Name returns the plugin name
func (f *federation) Name() string {
func (f *Federation) Name() string {
return "federation"
}
// MutateConfig mutates the configuration
func (f *federation) MutateConfig(cfg *config.Config) error {
builtins := config.TypeMap{
"_Service": {
Model: config.StringList{
"github.com/99designs/gqlgen/plugin/federation/fedruntime.Service",
},
},
"_Entity": {
Model: config.StringList{
"github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity",
},
},
"Entity": {
Model: config.StringList{
"github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity",
},
},
"_Any": {
Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"},
},
"federation__Scope": {
Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"},
},
"federation__Policy": {
Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"},
},
}
func (f *Federation) MutateConfig(cfg *config.Config) error {
for typeName, entry := range builtins {
if cfg.Models.Exists(typeName) {
return fmt.Errorf("%v already exists which must be reserved when Federation is enabled", typeName)
@ -79,13 +104,14 @@ func (f *federation) MutateConfig(cfg *config.Config) error {
cfg.Models[typeName] = entry
}
cfg.Directives["external"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["requires"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives[dirNameRequires] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["provides"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["key"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives[dirNameKey] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["extends"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives[dirNameEntityResolver] = config.DirectiveConfig{SkipRuntime: true}
// Federation 2 specific directives
if f.Version == 2 {
if f.version == 2 {
cfg.Directives["shareable"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["link"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["tag"] = config.DirectiveConfig{SkipRuntime: true}
@ -98,95 +124,48 @@ func (f *federation) MutateConfig(cfg *config.Config) error {
cfg.Directives["composeDirective"] = config.DirectiveConfig{SkipRuntime: true}
}
if f.usesRequires && f.PackageOptions.ComputedRequires {
cfg.Schema.Directives[dirPopulateFromRepresentations.Name] = dirPopulateFromRepresentations
cfg.Directives[dirPopulateFromRepresentations.Name] = config.DirectiveConfig{Implementation: &populateFromRepresentationsImplementation}
cfg.Schema.Directives[dirEntityReference.Name] = dirEntityReference
cfg.Directives[dirEntityReference.Name] = config.DirectiveConfig{SkipRuntime: true}
f.addMapType(cfg)
f.mutateSchemaForRequires(cfg.Schema, cfg)
}
return nil
}
func (f *federation) InjectSourceEarly() *ast.Source {
func (f *Federation) InjectSourcesEarly() ([]*ast.Source, error) {
input := ``
// add version-specific changes on key directive, as well as adding the new directives for federation 2
if f.Version == 1 {
input += `
directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE
directive @requires(fields: _FieldSet!) on FIELD_DEFINITION
directive @provides(fields: _FieldSet!) on FIELD_DEFINITION
directive @extends on OBJECT | INTERFACE
directive @external on FIELD_DEFINITION
scalar _Any
scalar _FieldSet
`
} else if f.Version == 2 {
input += `
directive @authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM
directive @composeDirective(name: String!) repeatable on SCHEMA
directive @extends on OBJECT | INTERFACE
directive @external on OBJECT | FIELD_DEFINITION
directive @key(fields: FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE
directive @inaccessible on
| ARGUMENT_DEFINITION
| ENUM
| ENUM_VALUE
| FIELD_DEFINITION
| INPUT_FIELD_DEFINITION
| INPUT_OBJECT
| INTERFACE
| OBJECT
| SCALAR
| UNION
directive @interfaceObject on OBJECT
directive @link(import: [String!], url: String!) repeatable on SCHEMA
directive @override(from: String!, label: String) on FIELD_DEFINITION
directive @policy(policies: [[federation__Policy!]!]!) on
| FIELD_DEFINITION
| OBJECT
| INTERFACE
| SCALAR
| ENUM
directive @provides(fields: FieldSet!) on FIELD_DEFINITION
directive @requires(fields: FieldSet!) on FIELD_DEFINITION
directive @requiresScopes(scopes: [[federation__Scope!]!]!) on
| FIELD_DEFINITION
| OBJECT
| INTERFACE
| SCALAR
| ENUM
directive @shareable repeatable on FIELD_DEFINITION | OBJECT
directive @tag(name: String!) repeatable on
| ARGUMENT_DEFINITION
| ENUM
| ENUM_VALUE
| FIELD_DEFINITION
| INPUT_FIELD_DEFINITION
| INPUT_OBJECT
| INTERFACE
| OBJECT
| SCALAR
| UNION
scalar _Any
scalar FieldSet
scalar federation__Policy
scalar federation__Scope
`
if f.version == 1 {
input += federationVersion1Schema
} else if f.version == 2 {
input += federationVersion2Schema
}
return &ast.Source{
Name: "federation/directives.graphql",
return []*ast.Source{{
Name: dirGraphQLQFile,
Input: input,
BuiltIn: true,
}
}}, nil
}
// InjectSourceLate creates a GraphQL Entity type with all
// the fields that had the @key directive
func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source {
f.setEntities(schema)
func (f *Federation) InjectSourcesLate(schema *ast.Schema) ([]*ast.Source, error) {
f.Entities = f.buildEntities(schema, f.version)
var entities, resolvers, entityResolverInputDefinitions string
entities := make([]string, 0)
resolvers := make([]string, 0)
entityResolverInputDefinitions := make([]string, 0)
for _, e := range f.Entities {
if e.Def.Kind != ast.Interface {
if entities != "" {
entities += " | "
}
entities += e.Name
entities = append(entities, e.Name)
} else if len(schema.GetPossibleTypes(e.Def)) == 0 {
fmt.Println(
"skipping @key field on interface " + e.Def.Name + " as no types implement it",
@ -194,48 +173,33 @@ func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source {
}
for _, r := range e.Resolvers {
if e.Multi {
if entityResolverInputDefinitions != "" {
entityResolverInputDefinitions += "\n\n"
}
entityResolverInputDefinitions += "input " + r.InputTypeName + " {\n"
for _, keyField := range r.KeyFields {
entityResolverInputDefinitions += fmt.Sprintf(
"\t%s: %s\n",
keyField.Field.ToGo(),
keyField.Definition.Type.String(),
)
}
entityResolverInputDefinitions += "}"
resolvers += fmt.Sprintf("\t%s(reps: [%s]!): [%s]\n", r.ResolverName, r.InputTypeName, e.Name)
} else {
resolverArgs := ""
for _, keyField := range r.KeyFields {
resolverArgs += fmt.Sprintf("%s: %s,", keyField.Field.ToGoPrivate(), keyField.Definition.Type.String())
}
resolvers += fmt.Sprintf("\t%s(%s): %s!\n", r.ResolverName, resolverArgs, e.Name)
resolverSDL, entityResolverInputSDL := buildResolverSDL(r, e.Multi)
resolvers = append(resolvers, resolverSDL)
if entityResolverInputSDL != "" {
entityResolverInputDefinitions = append(entityResolverInputDefinitions, entityResolverInputSDL)
}
}
}
var blocks []string
if entities != "" {
entities = `# a union of all types that use the @key directive
union _Entity = ` + entities
blocks = append(blocks, entities)
if len(entities) > 0 {
entitiesSDL := `# a union of all types that use the @key directive
union _Entity = ` + strings.Join(entities, " | ")
blocks = append(blocks, entitiesSDL)
}
// resolvers can be empty if a service defines only "empty
// extend" types. This should be rare.
if resolvers != "" {
if entityResolverInputDefinitions != "" {
blocks = append(blocks, entityResolverInputDefinitions)
if len(resolvers) > 0 {
if len(entityResolverInputDefinitions) > 0 {
inputSDL := strings.Join(entityResolverInputDefinitions, "\n\n")
blocks = append(blocks, inputSDL)
}
resolvers = `# fake type to build resolver interfaces for users to implement
resolversSDL := `# fake type to build resolver interfaces for users to implement
type Entity {
` + resolvers + `
` + strings.Join(resolvers, "\n") + `
}`
blocks = append(blocks, resolvers)
blocks = append(blocks, resolversSDL)
}
_serviceTypeDef := `type _Service {
@ -259,14 +223,14 @@ type Entity {
}`
blocks = append(blocks, extendTypeQueryDef)
return &ast.Source{
Name: "federation/entity.graphql",
return []*ast.Source{{
Name: entityGraphQLQFile,
BuiltIn: true,
Input: "\n" + strings.Join(blocks, "\n\n") + "\n",
}
}}, nil
}
func (f *federation) GenerateCode(data *codegen.Data) error {
func (f *Federation) GenerateCode(data *codegen.Data) error {
// requires imports
requiresImports := make(map[string]bool, 0)
requiresImports["context"] = true
@ -275,7 +239,11 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
requiresEntities := make(map[string]*Entity, 0)
// Save package options on f for template use
f.PackageOptions = data.Config.Federation.Options
packageOptions, err := buildPackageOptions(data.Config)
if err != nil {
return fmt.Errorf("invalid federation package options: %w", err)
}
f.PackageOptions = packageOptions
if len(f.Entities) > 0 {
if data.Objects.ByName("Entity") != nil {
@ -295,18 +263,7 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
}
for _, r := range e.Resolvers {
// fill in types for key fields
//
for _, keyField := range r.KeyFields {
if len(keyField.Field) == 0 {
fmt.Println(
"skipping @key field " + keyField.Definition.Name + " in " + r.ResolverName + " in " + e.Def.Name,
)
continue
}
cgField := keyField.Field.TypeReference(obj, data.Objects)
keyField.Type = cgField.TypeReference
}
populateKeyFieldTypes(r, obj, data.Objects, e.Def.Name)
}
// fill in types for requires fields
@ -348,69 +305,12 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
}
}
if data.Config.Federation.Options["explicit_requires"] && len(requiresEntities) > 0 {
// check for existing requires functions
type Populator struct {
FuncName string
Exists bool
Comment string
Implementation string
Entity *Entity
}
populators := make([]Populator, 0)
rewriter, err := rewrite.New(data.Config.Federation.Dir())
if err != nil {
return err
}
for name, entity := range requiresEntities {
populator := Populator{
FuncName: fmt.Sprintf("Populate%sRequires", name),
Entity: entity,
}
populator.Comment = strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment("executionContext", populator.FuncName), `\`))
populator.Implementation = strings.TrimSpace(rewriter.GetMethodBody("executionContext", populator.FuncName))
if populator.Implementation == "" {
populator.Exists = false
populator.Implementation = fmt.Sprintf("panic(fmt.Errorf(\"not implemented: %v\"))", populator.FuncName)
}
populators = append(populators, populator)
}
sort.Slice(populators, func(i, j int) bool {
return populators[i].FuncName < populators[j].FuncName
})
requiresFile := data.Config.Federation.Dir() + "/federation.requires.go"
existingImports := rewriter.ExistingImports(requiresFile)
for _, imp := range existingImports {
if imp.Alias == "" {
// import exists in both places, remove
delete(requiresImports, imp.ImportPath)
}
}
for k := range requiresImports {
existingImports = append(existingImports, rewrite.Import{ImportPath: k})
}
// render requires populators
err = templates.Render(templates.Options{
PackageName: data.Config.Federation.Package,
Filename: requiresFile,
Data: struct {
federation
ExistingImports []rewrite.Import
Populators []Populator
OriginalSource string
}{*f, existingImports, populators, ""},
GeneratedHeader: false,
Packages: data.Config.Packages,
Template: explicitRequiresTemplate,
})
if f.PackageOptions.ExplicitRequires && len(requiresEntities) > 0 {
err := f.generateExplicitRequires(
data,
requiresEntities,
requiresImports,
)
if err != nil {
return err
}
@ -420,7 +320,7 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
PackageName: data.Config.Federation.Package,
Filename: data.Config.Federation.Filename,
Data: struct {
federation
Federation
UsePointers bool
}{*f, data.Config.ResolversAlwaysReturnPointers},
GeneratedHeader: true,
@ -429,137 +329,227 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
})
}
func (f *federation) setEntities(schema *ast.Schema) {
// Fill in types for key fields
func populateKeyFieldTypes(
resolver *EntityResolver,
obj *codegen.Object,
allObjects codegen.Objects,
name string,
) {
for _, keyField := range resolver.KeyFields {
if len(keyField.Field) == 0 {
fmt.Println(
"skipping @key field " + keyField.Definition.Name + " in " + resolver.ResolverName + " in " + name,
)
continue
}
cgField := keyField.Field.TypeReference(obj, allObjects)
keyField.Type = cgField.TypeReference
}
}
func (f *Federation) buildEntities(schema *ast.Schema, version int) []*Entity {
entities := make([]*Entity, 0)
for _, schemaType := range schema.Types {
keys, ok := isFederatedEntity(schemaType)
if !ok {
continue
entity := f.buildEntity(schemaType, schema, version)
if entity != nil {
entities = append(entities, entity)
}
if (schemaType.Kind == ast.Interface) && (len(schema.GetPossibleTypes(schemaType)) == 0) {
fmt.Printf("@key directive found on unused \"interface %s\". Will be ignored.\n", schemaType.Name)
continue
}
e := &Entity{
Name: schemaType.Name,
Def: schemaType,
Resolvers: nil,
Requires: nil,
}
// Let's process custom entity resolver settings.
dir := schemaType.Directives.ForName("entityResolver")
if dir != nil {
if dirArg := dir.Arguments.ForName("multi"); dirArg != nil {
if dirVal, err := dirArg.Value.Value(nil); err == nil {
e.Multi = dirVal.(bool)
}
}
}
// If our schema has a field with a type defined in
// another service, then we need to define an "empty
// extend" of that type in this service, so this service
// knows what the type is like. But the graphql-server
// will never ask us to actually resolve this "empty
// extend", so we don't require a resolver function for
// it. (Well, it will never ask in practice; it's
// unclear whether the spec guarantees this. See
// https://github.com/apollographql/apollo-server/issues/3852
// ). Example:
// type MyType {
// myvar: TypeDefinedInOtherService
// }
// // Federation needs this type, but
// // it doesn't need a resolver for it!
// extend TypeDefinedInOtherService @key(fields: "id") {
// id: ID @external
// }
if !e.allFieldsAreExternal(f.Version) {
for _, dir := range keys {
if len(dir.Arguments) > 2 {
panic("More than two arguments provided for @key declaration.")
}
var arg *ast.Argument
// since keys are able to now have multiple arguments, we need to check both possible for a possible @key(fields="" fields="")
for _, a := range dir.Arguments {
if a.Name == "fields" {
if arg != nil {
panic("More than one `fields` provided for @key declaration.")
}
arg = a
}
}
keyFieldSet := fieldset.New(arg.Value.Raw, nil)
keyFields := make([]*KeyField, len(keyFieldSet))
resolverFields := []string{}
for i, field := range keyFieldSet {
def := field.FieldDefinition(schemaType, schema)
if def == nil {
panic(fmt.Sprintf("no field for %v", field))
}
keyFields[i] = &KeyField{Definition: def, Field: field}
resolverFields = append(resolverFields, keyFields[i].Field.ToGo())
}
resolverFieldsToGo := schemaType.Name + "By" + strings.Join(resolverFields, "And")
var resolverName string
if e.Multi {
resolverFieldsToGo += "s" // Pluralize for better API readability
resolverName = fmt.Sprintf("findMany%s", resolverFieldsToGo)
} else {
resolverName = fmt.Sprintf("find%s", resolverFieldsToGo)
}
e.Resolvers = append(e.Resolvers, &EntityResolver{
ResolverName: resolverName,
KeyFields: keyFields,
InputTypeName: resolverFieldsToGo + "Input",
})
}
e.Requires = []*Requires{}
for _, f := range schemaType.Fields {
dir := f.Directives.ForName("requires")
if dir == nil {
continue
}
if len(dir.Arguments) != 1 || dir.Arguments[0].Name != "fields" {
panic("Exactly one `fields` argument needed for @requires declaration.")
}
requiresFieldSet := fieldset.New(dir.Arguments[0].Value.Raw, nil)
for _, field := range requiresFieldSet {
e.Requires = append(e.Requires, &Requires{
Name: field.ToGoPrivate(),
Field: field,
})
}
}
}
f.Entities = append(f.Entities, e)
}
// make sure order remains stable across multiple builds
sort.Slice(f.Entities, func(i, j int) bool {
return f.Entities[i].Name < f.Entities[j].Name
sort.Slice(entities, func(i, j int) bool {
return entities[i].Name < entities[j].Name
})
return entities
}
func (f *Federation) buildEntity(
schemaType *ast.Definition,
schema *ast.Schema,
version int,
) *Entity {
keys, ok := isFederatedEntity(schemaType)
if !ok {
return nil
}
if (schemaType.Kind == ast.Interface) && (len(schema.GetPossibleTypes(schemaType)) == 0) {
fmt.Printf("@key directive found on unused \"interface %s\". Will be ignored.\n", schemaType.Name)
return nil
}
entity := &Entity{
Name: schemaType.Name,
Def: schemaType,
Resolvers: nil,
Requires: nil,
Multi: isMultiEntity(schemaType),
}
// If our schema has a field with a type defined in
// another service, then we need to define an "empty
// extend" of that type in this service, so this service
// knows what the type is like. But the graphql-server
// will never ask us to actually resolve this "empty
// extend", so we don't require a resolver function for
// it. (Well, it will never ask in practice; it's
// unclear whether the spec guarantees this. See
// https://github.com/apollographql/apollo-server/issues/3852
// ). Example:
// type MyType {
// myvar: TypeDefinedInOtherService
// }
// // Federation needs this type, but
// // it doesn't need a resolver for it!
// extend TypeDefinedInOtherService @key(fields: "id") {
// id: ID @external
// }
if entity.allFieldsAreExternal(version) {
return entity
}
entity.Resolvers = buildResolvers(schemaType, schema, keys, entity.Multi)
entity.Requires = buildRequires(schemaType)
if len(entity.Requires) > 0 {
f.usesRequires = true
}
return entity
}
func isMultiEntity(schemaType *ast.Definition) bool {
dir := schemaType.Directives.ForName(dirNameEntityResolver)
if dir == nil {
return false
}
if dirArg := dir.Arguments.ForName("multi"); dirArg != nil {
if dirVal, err := dirArg.Value.Value(nil); err == nil {
return dirVal.(bool)
}
}
return false
}
func buildResolvers(
schemaType *ast.Definition,
schema *ast.Schema,
keys []*ast.Directive,
multi bool,
) []*EntityResolver {
resolvers := make([]*EntityResolver, 0)
for _, dir := range keys {
if len(dir.Arguments) > 2 {
panic("More than two arguments provided for @key declaration.")
}
keyFields, resolverFields := buildKeyFields(
schemaType,
schema,
dir,
)
resolverFieldsToGo := schemaType.Name + "By" + strings.Join(resolverFields, "And")
var resolverName string
if multi {
resolverFieldsToGo += "s" // Pluralize for better API readability
resolverName = fmt.Sprintf("findMany%s", resolverFieldsToGo)
} else {
resolverName = fmt.Sprintf("find%s", resolverFieldsToGo)
}
resolvers = append(resolvers, &EntityResolver{
ResolverName: resolverName,
KeyFields: keyFields,
InputTypeName: resolverFieldsToGo + "Input",
ReturnTypeName: schemaType.Name,
})
}
return resolvers
}
func extractFields(
dir *ast.Directive,
) (string, error) {
var arg *ast.Argument
// since directives are able to now have multiple arguments, we need to check both possible for a possible @key(fields="" fields="")
for _, a := range dir.Arguments {
if a.Name == DirArgFields {
if arg != nil {
return "", errors.New("more than one \"fields\" argument provided for declaration")
}
arg = a
}
}
return arg.Value.Raw, nil
}
func buildKeyFields(
schemaType *ast.Definition,
schema *ast.Schema,
dir *ast.Directive,
) ([]*KeyField, []string) {
fieldsRaw, err := extractFields(dir)
if err != nil {
panic("More than one `fields` argument provided for declaration.")
}
keyFieldSet := fieldset.New(fieldsRaw, nil)
keyFields := make([]*KeyField, len(keyFieldSet))
resolverFields := []string{}
for i, field := range keyFieldSet {
def := field.FieldDefinition(schemaType, schema)
if def == nil {
panic(fmt.Sprintf("no field for %v", field))
}
keyFields[i] = &KeyField{Definition: def, Field: field}
resolverFields = append(resolverFields, keyFields[i].Field.ToGo())
}
return keyFields, resolverFields
}
func buildRequires(schemaType *ast.Definition) []*Requires {
requires := make([]*Requires, 0)
for _, f := range schemaType.Fields {
dir := f.Directives.ForName(dirNameRequires)
if dir == nil {
continue
}
fieldsRaw, err := extractFields(dir)
if err != nil {
panic("Exactly one `fields` argument needed for @requires declaration.")
}
requiresFieldSet := fieldset.New(fieldsRaw, nil)
for _, field := range requiresFieldSet {
requires = append(requires, &Requires{
Name: field.ToGoPrivate(),
Field: field,
})
}
}
return requires
}
func isFederatedEntity(schemaType *ast.Definition) ([]*ast.Directive, bool) {
switch schemaType.Kind {
case ast.Object:
keys := schemaType.Directives.ForNames("key")
keys := schemaType.Directives.ForNames(dirNameKey)
if len(keys) > 0 {
return keys, true
}
case ast.Interface:
keys := schemaType.Directives.ForNames("key")
keys := schemaType.Directives.ForNames(dirNameKey)
if len(keys) > 0 {
return keys, true
}
@ -577,3 +567,146 @@ func isFederatedEntity(schemaType *ast.Definition) ([]*ast.Directive, bool) {
}
return nil, false
}
func (f *Federation) generateExplicitRequires(
data *codegen.Data,
requiresEntities map[string]*Entity,
requiresImports map[string]bool,
) error {
// check for existing requires functions
type Populator struct {
FuncName string
Exists bool
Comment string
Implementation string
Entity *Entity
}
populators := make([]Populator, 0)
rewriter, err := rewrite.New(data.Config.Federation.Dir())
if err != nil {
return err
}
for name, entity := range requiresEntities {
populator := Populator{
FuncName: fmt.Sprintf("Populate%sRequires", name),
Entity: entity,
}
populator.Comment = strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment("executionContext", populator.FuncName), `\`))
populator.Implementation = strings.TrimSpace(rewriter.GetMethodBody("executionContext", populator.FuncName))
if populator.Implementation == "" {
populator.Exists = false
populator.Implementation = fmt.Sprintf("panic(fmt.Errorf(\"not implemented: %v\"))", populator.FuncName)
}
populators = append(populators, populator)
}
sort.Slice(populators, func(i, j int) bool {
return populators[i].FuncName < populators[j].FuncName
})
requiresFile := data.Config.Federation.Dir() + "/federation.requires.go"
existingImports := rewriter.ExistingImports(requiresFile)
for _, imp := range existingImports {
if imp.Alias == "" {
// import exists in both places, remove
delete(requiresImports, imp.ImportPath)
}
}
for k := range requiresImports {
existingImports = append(existingImports, rewrite.Import{ImportPath: k})
}
// render requires populators
return templates.Render(templates.Options{
PackageName: data.Config.Federation.Package,
Filename: requiresFile,
Data: struct {
Federation
ExistingImports []rewrite.Import
Populators []Populator
OriginalSource string
}{*f, existingImports, populators, ""},
GeneratedHeader: false,
Packages: data.Config.Packages,
Template: explicitRequiresTemplate,
})
}
func buildResolverSDL(
resolver *EntityResolver,
multi bool,
) (resolverSDL, entityResolverInputSDL string) {
if multi {
entityResolverInputSDL = buildEntityResolverInputDefinitionSDL(resolver)
resolverSDL := fmt.Sprintf("\t%s(reps: [%s]!): [%s]", resolver.ResolverName, resolver.InputTypeName, resolver.ReturnTypeName)
return resolverSDL, entityResolverInputSDL
}
resolverArgs := ""
for _, keyField := range resolver.KeyFields {
resolverArgs += fmt.Sprintf("%s: %s,", keyField.Field.ToGoPrivate(), keyField.Definition.Type.String())
}
resolverSDL = fmt.Sprintf("\t%s(%s): %s!", resolver.ResolverName, resolverArgs, resolver.ReturnTypeName)
return resolverSDL, ""
}
func buildEntityResolverInputDefinitionSDL(resolver *EntityResolver) string {
entityResolverInputDefinition := "input " + resolver.InputTypeName + " {\n"
for _, keyField := range resolver.KeyFields {
entityResolverInputDefinition += fmt.Sprintf(
"\t%s: %s\n",
keyField.Field.ToGo(),
keyField.Definition.Type.String(),
)
}
return entityResolverInputDefinition + "}"
}
func (f *Federation) addMapType(cfg *config.Config) {
cfg.Models[mapTypeName] = config.TypeMapEntry{
Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"},
}
cfg.Schema.Types[mapTypeName] = &ast.Definition{
Kind: ast.Scalar,
Name: mapTypeName,
Description: "Maps an arbitrary GraphQL value to a map[string]any Go type.",
}
}
func (f *Federation) mutateSchemaForRequires(
schema *ast.Schema,
cfg *config.Config,
) {
for _, schemaType := range schema.Types {
for _, field := range schemaType.Fields {
if dir := field.Directives.ForName(dirNameRequires); dir != nil {
// ensure we always generate a resolver for any @requires field
model := cfg.Models[schemaType.Name]
fieldConfig := model.Fields[field.Name]
fieldConfig.Resolver = true
if model.Fields == nil {
model.Fields = make(map[string]config.TypeMapField)
}
model.Fields[field.Name] = fieldConfig
cfg.Models[schemaType.Name] = model
requiresArgument := &ast.ArgumentDefinition{
Name: fieldArgRequires,
Type: ast.NamedType(mapTypeName, nil),
Directives: ast.DirectiveList{
{
Name: dirNamePopulateFromRepresentations,
Definition: dirPopulateFromRepresentations,
},
},
}
field.Arguments = append(field.Arguments, requiresArgument)
}
}
}
}

View File

@ -36,15 +36,50 @@ func (ec *executionContext) __resolve__service(ctx context.Context) (fedruntime.
func (ec *executionContext) __resolve_entities(ctx context.Context, representations []map[string]interface{}) []fedruntime.Entity {
list := make([]fedruntime.Entity, len(representations))
repsMap := map[string]struct {
i []int
r []map[string]interface{}
}{}
repsMap := ec.buildRepresentationGroups(ctx, representations)
switch len(repsMap) {
case 0:
return list
case 1:
for typeName, reps := range repsMap {
ec.resolveEntityGroup(ctx, typeName, reps, list)
}
return list
default:
var g sync.WaitGroup
g.Add(len(repsMap))
for typeName, reps := range repsMap {
go func(typeName string, reps []EntityWithIndex) {
ec.resolveEntityGroup(ctx, typeName, reps, list)
g.Done()
}(typeName, reps)
}
g.Wait()
return list
}
}
type EntityWithIndex struct {
// The index in the original representation array
index int
entity EntityRepresentation
}
// EntityRepresentation is the JSON representation of an entity sent by the Router
// used as the inputs for us to resolve.
//
// We make it a map because we know the top level JSON is always an object.
type EntityRepresentation map[string]any
// We group entities by typename so that we can parallelize their resolution.
// This is particularly helpful when there are entity groups in multi mode.
buildRepresentationGroups := func(reps []map[string]interface{}) {
for i, rep := range reps {
func (ec *executionContext) buildRepresentationGroups(
ctx context.Context,
representations []map[string]any,
) map[string][]EntityWithIndex {
repsMap := make(map[string][]EntityWithIndex)
for i, rep := range representations {
typeName, ok := rep["__typename"].(string)
if !ok {
// If there is no __typename, we just skip the representation;
@ -53,14 +88,48 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
continue
}
_r := repsMap[typeName]
_r.i = append(_r.i, i)
_r.r = append(_r.r, rep)
repsMap[typeName] = _r
}
repsMap[typeName] = append(repsMap[typeName], EntityWithIndex{
index: i,
entity: rep,
})
}
isMulti := func(typeName string) bool {
return repsMap
}
func (ec *executionContext) resolveEntityGroup(
ctx context.Context,
typeName string,
reps []EntityWithIndex,
list []fedruntime.Entity,
) {
if isMulti(typeName) {
err := ec.resolveManyEntities(ctx, typeName, reps, list)
if err != nil {
ec.Error(ctx, err)
}
} else {
// if there are multiple entities to resolve, parallelize (similar to
// graphql.FieldSet.Dispatch)
var e sync.WaitGroup
e.Add(len(reps))
for i, rep := range reps {
i, rep := i, rep
go func(i int, rep EntityWithIndex) {
entity, err := ec.resolveEntity(ctx, typeName, rep.entity)
if err != nil {
ec.Error(ctx, err)
} else {
list[rep.index] = entity
}
e.Done()
}(i, rep)
}
e.Wait()
}
}
func isMulti(typeName string) bool {
switch typeName {
{{- range .Entities -}}
{{- if .Resolvers -}}
@ -75,7 +144,11 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
}
}
resolveEntity := func(ctx context.Context, typeName string, rep map[string]interface{}, idx []int, i int) (err error) {
func (ec *executionContext) resolveEntity(
ctx context.Context,
typeName string,
rep EntityRepresentation,
) (e fedruntime.Entity, err error) {
// we need to do our own panic handling, because we may be called in a
// goroutine, where the usual panic handling can't catch us
defer func () {
@ -90,45 +163,51 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
case "{{.Def.Name}}":
resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, rep)
if err != nil {
return fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err)
return nil, fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err)
}
switch resolverName {
{{ range $i, $resolver := .Resolvers }}
case "{{.ResolverName}}":
{{- range $j, $keyField := .KeyFields }}
id{{$j}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
id{{$j}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
if err != nil {
return fmt.Errorf(`unmarshalling param {{$j}} for {{$resolver.ResolverName}}(): %w`, err)
return nil, fmt.Errorf(`unmarshalling param {{$j}} for {{$resolver.ResolverName}}(): %w`, err)
}
{{- end}}
entity, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, {{- range $j, $_ := .KeyFields -}} id{{$j}}, {{end}})
if err != nil {
return fmt.Errorf(`resolving Entity "{{$entity.Def.Name}}": %w`, err)
return nil, fmt.Errorf(`resolving Entity "{{$entity.Def.Name}}": %w`, err)
}
{{ if and (index $options "explicit_requires") $entity.Requires }}
{{- if $options.ComputedRequires }}
{{/* We don't do anything in this case, computed requires are handled by standard resolvers */}}
{{- else if and $options.ExplicitRequires $entity.Requires }}
err = ec.Populate{{$entity.Def.Name}}Requires(ctx, {{- if (not $usePointers) -}}&{{- end -}}entity, rep)
if err != nil {
return fmt.Errorf(`populating requires for Entity "{{$entity.Def.Name}}": %w`, err)
return nil, fmt.Errorf(`populating requires for Entity "{{$entity.Def.Name}}": %w`, err)
}
{{- else }}
{{ range $entity.Requires }}
entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
if err != nil {
return err
return nil, err
}
{{- end }}
{{- end }}
list[idx[i]] = entity
return nil
return entity, nil
{{- end }}
}
{{ end }}
{{- end }}
}
return fmt.Errorf("%w: %s", ErrUnknownType, typeName)
return nil, fmt.Errorf("%w: %s", ErrUnknownType, typeName)
}
resolveManyEntities := func(ctx context.Context, typeName string, reps []map[string]interface{}, idx []int) (err error) {
func (ec *executionContext) resolveManyEntities(
ctx context.Context,
typeName string,
reps []EntityWithIndex,
list []fedruntime.Entity,
) (err error) {
// we need to do our own panic handling, because we may be called in a
// goroutine, where the usual panic handling can't catch us
defer func () {
@ -141,43 +220,43 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
{{ range $_, $entity := .Entities }}
{{ if and .Resolvers .Multi -}}
case "{{.Def.Name}}":
resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, reps[0])
resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, reps[0].entity)
if err != nil {
return fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err)
}
switch resolverName {
{{ range $i, $resolver := .Resolvers }}
case "{{.ResolverName}}":
_reps := make([]*{{.LookupInputType}}, len(reps))
typedReps := make([]*{{.LookupInputType}}, len(reps))
for i, rep := range reps {
{{ range $i, $keyField := .KeyFields -}}
id{{$i}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
id{{$i}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep.entity["{{.Field.Join `"].(map[string]interface{})["`}}"])
if err != nil {
return errors.New(fmt.Sprintf("Field %s undefined in schema.", "{{.Definition.Name}}"))
}
{{end}}
_reps[i] = &{{.LookupInputType}} {
typedReps[i] = &{{.LookupInputType}} {
{{ range $i, $keyField := .KeyFields -}}
{{$keyField.Field.ToGo}}: id{{$i}},
{{end}}
}
}
entities, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, _reps)
entities, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, typedReps)
if err != nil {
return err
}
for i, entity := range entities {
{{- range $entity.Requires }}
entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, reps[i]["{{.Field.Join `"].(map[string]interface{})["`}}"])
entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, reps[i].entity["{{.Field.Join `"].(map[string]interface{})["`}}"])
if err != nil {
return err
}
{{- end}}
list[idx[i]] = entity
list[reps[i].index] = entity
}
return nil
{{ end }}
@ -188,54 +267,6 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
{{- end }}
default:
return errors.New("unknown type: "+typeName)
}
}
resolveEntityGroup := func(typeName string, reps []map[string]interface{}, idx []int) {
if isMulti(typeName) {
err := resolveManyEntities(ctx, typeName, reps, idx)
if err != nil {
ec.Error(ctx, err)
}
} else {
// if there are multiple entities to resolve, parallelize (similar to
// graphql.FieldSet.Dispatch)
var e sync.WaitGroup
e.Add(len(reps))
for i, rep := range reps {
i, rep := i, rep
go func(i int, rep map[string]interface{}) {
err := resolveEntity(ctx, typeName, rep, idx, i)
if err != nil {
ec.Error(ctx, err)
}
e.Done()
}(i, rep)
}
e.Wait()
}
}
buildRepresentationGroups(representations)
switch len(repsMap) {
case 0:
return list
case 1:
for typeName, reps := range repsMap {
resolveEntityGroup(typeName, reps.r, reps.i)
}
return list
default:
var g sync.WaitGroup
g.Add(len(repsMap))
for typeName, reps := range repsMap {
go func(typeName string, reps []map[string]interface{}, idx []int) {
resolveEntityGroup(typeName, reps, idx)
g.Done()
}(typeName, reps.r, reps.i)
}
g.Wait()
return list
}
}
@ -244,13 +275,13 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
{{ range $_, $entity := .Entities }}
{{- if .Resolvers }}
func entityResolverNameFor{{$entity.Name}}(ctx context.Context, rep map[string]interface{}) (string, error) {
func entityResolverNameFor{{$entity.Name}}(ctx context.Context, rep EntityRepresentation) (string, error) {
{{- range .Resolvers }}
for {
var (
m map[string]interface{}
m EntityRepresentation
val interface{}
ok bool
ok bool
)
_ = val
// if all of the KeyFields values for this resolver are null,

View File

@ -18,7 +18,7 @@ TODO(miguel): add details.
# Entity resolvers - GetMany entities
The federation plugin implements `GetMany` semantics in which entity resolvers get the entire list of representations that need to be resolved. This functionality is currently optin tho, and to enable it you need to specify the directive `@entityResolver` in the federated entity you want this feature for. E.g.
The federation plugin implements `GetMany` semantics in which entity resolvers get the entire list of representations that need to be resolved. This functionality is currently option tho, and to enable it you need to specify the directive `@entityResolver` in the federated entity you want this feature for. E.g.
```
directive @entityResolver(multi: Boolean) on OBJECT
@ -39,4 +39,4 @@ func (r *entityResolver) FindManyMultiHellosByName(ctx context.Context, reps []*
```
**Note:**
If you are using `omit_slice_element_pointers: true` option in your config yaml, your `GetMany` resolver will still generate in the example above the same signature `FindManyMultiHellosByName(ctx context.Context, reps []*generated.ManyMultiHellosByNameInput) ([]*generated.MultiHello, error)`. But all other instances will continue to honor `omit_slice_element_pointers: true`
If you are using `omit_slice_element_pointers: true` option in your config yaml, your `GetMany` resolver will still generate in the example above the same signature `FindManyMultiHellosByName(ctx context.Context, reps []*generated.ManyMultiHellosByNameInput) ([]*generated.MultiHello, error)`. But all other instances will continue to honor `omit_slice_element_pointers: true`

View File

@ -26,11 +26,6 @@ type (
// DefaultFieldMutateHook is the default hook for the Plugin which applies the GoFieldHook and GoTagFieldHook.
func DefaultFieldMutateHook(td *ast.Definition, fd *ast.FieldDefinition, f *Field) (*Field, error) {
var err error
f, err = GoFieldHook(td, fd, f)
if err != nil {
return f, err
}
return GoTagFieldHook(td, fd, f)
}
@ -337,111 +332,16 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition)
binder := cfg.NewBinder()
fields := make([]*Field, 0)
var omittableType types.Type
for _, field := range schemaType.Fields {
var typ types.Type
fieldDef := cfg.Schema.Types[field.Type.Name()]
if cfg.Models.UserDefined(field.Type.Name()) {
var err error
typ, err = binder.FindTypeFromName(cfg.Models[field.Type.Name()].Model[0])
if err != nil {
return nil, err
}
} else {
switch fieldDef.Kind {
case ast.Scalar:
// no user defined model, referencing a default scalar
typ = types.NewNamed(
types.NewTypeName(0, cfg.Model.Pkg(), "string", nil),
nil,
nil,
)
case ast.Interface, ast.Union:
// no user defined model, referencing a generated interface type
typ = types.NewNamed(
types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
types.NewInterfaceType([]*types.Func{}, []types.Type{}),
nil,
)
case ast.Enum:
// no user defined model, must reference a generated enum
typ = types.NewNamed(
types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
nil,
nil,
)
case ast.Object, ast.InputObject:
// no user defined model, must reference a generated struct
typ = types.NewNamed(
types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
types.NewStruct(nil, nil),
nil,
)
default:
panic(fmt.Errorf("unknown ast type %s", fieldDef.Kind))
}
f, err := m.generateField(cfg, binder, schemaType, field)
if err != nil {
return nil, err
}
name := templates.ToGo(field.Name)
if nameOveride := cfg.Models[schemaType.Name].Fields[field.Name].FieldName; nameOveride != "" {
name = nameOveride
}
typ = binder.CopyModifiersFromAst(field.Type, typ)
if cfg.StructFieldsAlwaysPointers {
if isStruct(typ) && (fieldDef.Kind == ast.Object || fieldDef.Kind == ast.InputObject) {
typ = types.NewPointer(typ)
}
}
f := &Field{
Name: field.Name,
GoName: name,
Type: typ,
Description: field.Description,
Tag: getStructTagFromField(cfg, field),
Omittable: cfg.NullableInputOmittable && schemaType.Kind == ast.InputObject && !field.Type.NonNull,
}
if m.FieldHook != nil {
mf, err := m.FieldHook(schemaType, field, f)
if err != nil {
return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err)
}
f = mf
}
if f.IsResolver && cfg.OmitResolverFields {
if f == nil {
continue
}
if f.Omittable {
if schemaType.Kind != ast.InputObject || field.Type.NonNull {
return nil, fmt.Errorf("generror: field %v.%v: omittable is only applicable to nullable input fields", schemaType.Name, field.Name)
}
var err error
if omittableType == nil {
omittableType, err = binder.FindTypeFromName("github.com/99designs/gqlgen/graphql.Omittable")
if err != nil {
return nil, err
}
}
f.Type, err = binder.InstantiateType(omittableType, []types.Type{f.Type})
if err != nil {
return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err)
}
}
fields = append(fields, f)
}
@ -450,6 +350,123 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition)
return fields, nil
}
func (m *Plugin) generateField(
cfg *config.Config,
binder *config.Binder,
schemaType *ast.Definition,
field *ast.FieldDefinition,
) (*Field, error) {
var omittableType types.Type
var typ types.Type
fieldDef := cfg.Schema.Types[field.Type.Name()]
if cfg.Models.UserDefined(field.Type.Name()) {
var err error
typ, err = binder.FindTypeFromName(cfg.Models[field.Type.Name()].Model[0])
if err != nil {
return nil, err
}
} else {
switch fieldDef.Kind {
case ast.Scalar:
// no user defined model, referencing a default scalar
typ = types.NewNamed(
types.NewTypeName(0, cfg.Model.Pkg(), "string", nil),
nil,
nil,
)
case ast.Interface, ast.Union:
// no user defined model, referencing a generated interface type
typ = types.NewNamed(
types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
types.NewInterfaceType([]*types.Func{}, []types.Type{}),
nil,
)
case ast.Enum:
// no user defined model, must reference a generated enum
typ = types.NewNamed(
types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
nil,
nil,
)
case ast.Object, ast.InputObject:
// no user defined model, must reference a generated struct
typ = types.NewNamed(
types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
types.NewStruct(nil, nil),
nil,
)
default:
panic(fmt.Errorf("unknown ast type %s", fieldDef.Kind))
}
}
name := templates.ToGo(field.Name)
if nameOverride := cfg.Models[schemaType.Name].Fields[field.Name].FieldName; nameOverride != "" {
name = nameOverride
}
typ = binder.CopyModifiersFromAst(field.Type, typ)
if cfg.StructFieldsAlwaysPointers {
if isStruct(typ) && (fieldDef.Kind == ast.Object || fieldDef.Kind == ast.InputObject) {
typ = types.NewPointer(typ)
}
}
f := &Field{
Name: field.Name,
GoName: name,
Type: typ,
Description: field.Description,
Tag: getStructTagFromField(cfg, field),
Omittable: cfg.NullableInputOmittable && schemaType.Kind == ast.InputObject && !field.Type.NonNull,
IsResolver: cfg.Models[schemaType.Name].Fields[field.Name].Resolver,
}
if omittable := cfg.Models[schemaType.Name].Fields[field.Name].Omittable; omittable != nil {
f.Omittable = *omittable
}
if m.FieldHook != nil {
mf, err := m.FieldHook(schemaType, field, f)
if err != nil {
return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err)
}
f = mf
}
if f.IsResolver && cfg.OmitResolverFields {
return nil, nil
}
if f.Omittable {
if schemaType.Kind != ast.InputObject || field.Type.NonNull {
return nil, fmt.Errorf("generror: field %v.%v: omittable is only applicable to nullable input fields", schemaType.Name, field.Name)
}
var err error
if omittableType == nil {
omittableType, err = binder.FindTypeFromName("github.com/99designs/gqlgen/graphql.Omittable")
if err != nil {
return nil, err
}
}
f.Type, err = binder.InstantiateType(omittableType, []types.Type{f.Type})
if err != nil {
return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err)
}
}
return f, nil
}
func getExtraFields(cfg *config.Config, modelName string) []*Field {
modelcfg := cfg.Models[modelName]
@ -636,29 +653,9 @@ func removeDuplicateTags(t string) string {
return returnTags
}
// GoFieldHook applies the goField directive to the generated Field f.
// GoFieldHook is a noop
// TODO: This will be removed in the next breaking release
func GoFieldHook(td *ast.Definition, fd *ast.FieldDefinition, f *Field) (*Field, error) {
args := make([]string, 0)
_ = args
for _, goField := range fd.Directives.ForNames("goField") {
if arg := goField.Arguments.ForName("name"); arg != nil {
if k, err := arg.Value.Value(nil); err == nil {
f.GoName = k.(string)
}
}
if arg := goField.Arguments.ForName("forceResolver"); arg != nil {
if k, err := arg.Value.Value(nil); err == nil {
f.IsResolver = k.(bool)
}
}
if arg := goField.Arguments.ForName("omittable"); arg != nil {
if k, err := arg.Value.Value(nil); err == nil {
f.Omittable = k.(bool)
}
}
}
return f, nil
}

View File

@ -22,11 +22,18 @@ type CodeGenerator interface {
}
// EarlySourceInjector is used to inject things that are required for user schema files to compile.
// Deprecated: Use EarlySourcesInjector instead
type EarlySourceInjector interface {
InjectSourceEarly() *ast.Source
}
// EarlySourcesInjector is used to inject things that are required for user schema files to compile.
type EarlySourcesInjector interface {
InjectSourcesEarly() ([]*ast.Source, error)
}
// LateSourceInjector is used to inject more sources, after we have loaded the users schema.
// Deprecated: Use LateSourcesInjector instead
type LateSourceInjector interface {
InjectSourceLate(schema *ast.Schema) *ast.Source
}
@ -35,3 +42,8 @@ type LateSourceInjector interface {
type ResolverImplementer interface {
Implement(prevImplementation string, field *codegen.Field) string
}
// LateSourcesInjector is used to inject more sources, after we have loaded the users schema.
type LateSourcesInjector interface {
InjectSourcesLate(schema *ast.Schema) ([]*ast.Source, error)
}

View File

@ -53,26 +53,44 @@ func (m *Plugin) GenerateCode(data *codegen.Data) error {
func (m *Plugin) generateSingleFile(data *codegen.Data) error {
file := File{}
if _, err := os.Stat(data.Config.Resolver.Filename); err == nil {
// file already exists and we do not support updating resolvers with layout = single so just return
return nil
rewriter, err := rewrite.New(data.Config.Resolver.Dir())
if err != nil {
return err
}
for _, o := range data.Objects {
if o.HasResolvers() {
caser := cases.Title(language.English, cases.NoLower)
rewriter.MarkStructCopied(templates.LcFirst(o.Name) + templates.UcFirst(data.Config.Resolver.Type))
rewriter.GetMethodBody(data.Config.Resolver.Type, caser.String(o.Name))
file.Objects = append(file.Objects, o)
}
for _, f := range o.Fields {
if !f.IsResolver {
continue
}
resolver := Resolver{o, f, nil, "", `panic("not implemented")`, nil}
file.Resolvers = append(file.Resolvers, &resolver)
structName := templates.LcFirst(o.Name) + templates.UcFirst(data.Config.Resolver.Type)
comment := strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment(structName, f.GoFieldName), `\`))
implementation := strings.TrimSpace(rewriter.GetMethodBody(structName, f.GoFieldName))
if implementation != "" {
resolver := Resolver{o, f, rewriter.GetPrevDecl(structName, f.GoFieldName), comment, implementation, nil}
file.Resolvers = append(file.Resolvers, &resolver)
} else {
resolver := Resolver{o, f, nil, "", `panic("not implemented")`, nil}
file.Resolvers = append(file.Resolvers, &resolver)
}
}
}
if _, err := os.Stat(data.Config.Resolver.Filename); err == nil {
file.name = data.Config.Resolver.Filename
file.imports = rewriter.ExistingImports(file.name)
file.RemainingSource = rewriter.RemainingSource(file.name)
}
resolverBuild := &ResolverBuild{
File: &file,
PackageName: data.Config.Resolver.Package,
@ -88,7 +106,7 @@ func (m *Plugin) generateSingleFile(data *codegen.Data) error {
return templates.Render(templates.Options{
PackageName: data.Config.Resolver.Package,
FileNotice: `// THIS CODE IS A STARTING POINT ONLY. IT WILL NOT BE UPDATED WITH SCHEMA CHANGES.`,
FileNotice: `// THIS CODE WILL BE UPDATED WITH SCHEMA CHANGES. PREVIOUS IMPLEMENTATION FOR SCHEMA CHANGES WILL BE KEPT IN THE COMMENT SECTION. IMPLEMENTATION FOR UNCHANGED SCHEMA WILL BE KEPT.`,
Filename: data.Config.Resolver.Filename,
Data: resolverBuild,
Packages: data.Config.Packages,

View File

@ -48,5 +48,7 @@
// - When renaming or deleting a resolver the old code will be put in here. You can safely delete
// it when you're done.
// - You have helper methods in this file. Move them out to keep these resolver files clean.
/*
{{ .RemainingSource }}
*/
{{ end }}

View File

@ -1,5 +1,33 @@
# Changelog
## 3.3.0 (2024-08-27)
### Added
- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
- #213: nil version equality checking (thanks @KnutZuidema)
### Changed
- #241: Simplify StrictNewVersion parsing (thanks @grosser)
- Testing support up through Go 1.23
- Minimum version set to 1.21 as this is what's tested now
- Fuzz testing now supports caching
## 3.2.1 (2023-04-10)
### Changed
- #198: Improved testing around pre-release names
- #200: Improved code scanning with addition of CodeQL
- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
- #203: Docs updated for security details
### Fixed
- #199: Fixed issue with range transformations
## 3.2.0 (2022-11-28)
### Added

View File

@ -19,6 +19,7 @@ test-cover:
.PHONY: fuzz
fuzz:
@echo "==> Running Fuzz Tests"
go env GOCACHE
go test -fuzz=FuzzNewVersion -fuzztime=15s .
go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
go test -fuzz=FuzzNewConstraint -fuzztime=15s .
@ -27,4 +28,4 @@ $(GOLANGCI_LINT):
# Install golangci-lint. The configuration for it is in the .golangci.yml
# file in the root of the repository
echo ${GOPATH}
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2

View File

@ -13,12 +13,9 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds
[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3)
[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
If you are looking for a command line tool for version comparisons please see
[vert](https://github.com/Masterminds/vert) which uses this library.
## Package Versions
Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version.
Note, import `github.com/Masterminds/semver/v3` to use the latest version.
There are three major versions fo the `semver` package.
@ -80,12 +77,12 @@ There are two methods for comparing versions. One uses comparison methods on
differences to notes between these two methods of comparison.
1. When two versions are compared using functions such as `Compare`, `LessThan`,
and others it will follow the specification and always include prereleases
and others it will follow the specification and always include pre-releases
within the comparison. It will provide an answer that is valid with the
comparison section of the spec at https://semver.org/#spec-item-11
2. When constraint checking is used for checks or validation it will follow a
different set of rules that are common for ranges with tools like npm/js
and Rust/Cargo. This includes considering prereleases to be invalid if the
and Rust/Cargo. This includes considering pre-releases to be invalid if the
ranges does not include one. If you want to have it include pre-releases a
simple solution is to include `-0` in your range.
3. Constraint ranges can have some complex rules including the shorthand use of
@ -113,7 +110,7 @@ v, err := semver.NewVersion("1.3")
if err != nil {
// Handle version not being parsable.
}
// Check if the version meets the constraints. The a variable will be true.
// Check if the version meets the constraints. The variable a will be true.
a := c.Check(v)
```
@ -137,20 +134,20 @@ The basic comparisons are:
### Working With Prerelease Versions
Pre-releases, for those not familiar with them, are used for software releases
prior to stable or generally available releases. Examples of prereleases include
development, alpha, beta, and release candidate releases. A prerelease may be
prior to stable or generally available releases. Examples of pre-releases include
development, alpha, beta, and release candidate releases. A pre-release may be
a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
order of precedence, prereleases come before their associated releases. In this
order of precedence, pre-releases come before their associated releases. In this
example `1.2.3-beta.1 < 1.2.3`.
According to the Semantic Version specification prereleases may not be
According to the Semantic Version specification, pre-releases may not be
API compliant with their release counterpart. It says,
> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
SemVer comparisons using constraints without a prerelease comparator will skip
prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
SemVer's comparisons using constraints without a pre-release comparator will skip
pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
The reason for the `0` as a pre-release version in the example comparison is
because pre-releases can only contain ASCII alphanumerics and hyphens (along with
@ -171,6 +168,9 @@ These look like:
* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
### Wildcards In Comparisons
The `x`, `X`, and `*` characters can be used as a wildcard character. This works

View File

@ -83,22 +83,23 @@ func StrictNewVersion(v string) (*Version, error) {
original: v,
}
// check for prerelease or build metadata
var extra []string
if strings.ContainsAny(parts[2], "-+") {
// Start with the build metadata first as it needs to be on the right
extra = strings.SplitN(parts[2], "+", 2)
if len(extra) > 1 {
// build metadata found
sv.metadata = extra[1]
parts[2] = extra[0]
// Extract build metadata
if strings.Contains(parts[2], "+") {
extra := strings.SplitN(parts[2], "+", 2)
sv.metadata = extra[1]
parts[2] = extra[0]
if err := validateMetadata(sv.metadata); err != nil {
return nil, err
}
}
extra = strings.SplitN(parts[2], "-", 2)
if len(extra) > 1 {
// prerelease found
sv.pre = extra[1]
parts[2] = extra[0]
// Extract build prerelease
if strings.Contains(parts[2], "-") {
extra := strings.SplitN(parts[2], "-", 2)
sv.pre = extra[1]
parts[2] = extra[0]
if err := validatePrerelease(sv.pre); err != nil {
return nil, err
}
}
@ -114,7 +115,7 @@ func StrictNewVersion(v string) (*Version, error) {
}
}
// Extract the major, minor, and patch elements onto the returned Version
// Extract major, minor, and patch
var err error
sv.major, err = strconv.ParseUint(parts[0], 10, 64)
if err != nil {
@ -131,23 +132,6 @@ func StrictNewVersion(v string) (*Version, error) {
return nil, err
}
// No prerelease or build metadata found so returning now as a fastpath.
if sv.pre == "" && sv.metadata == "" {
return sv, nil
}
if sv.pre != "" {
if err = validatePrerelease(sv.pre); err != nil {
return nil, err
}
}
if sv.metadata != "" {
if err = validateMetadata(sv.metadata); err != nil {
return nil, err
}
}
return sv, nil
}
@ -381,15 +365,31 @@ func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0
}
// LessThanEqual tests if one version is less or equal than another one.
func (v *Version) LessThanEqual(o *Version) bool {
return v.Compare(o) <= 0
}
// GreaterThan tests if one version is greater than another one.
func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0
}
// GreaterThanEqual tests if one version is greater or equal than another one.
func (v *Version) GreaterThanEqual(o *Version) bool {
return v.Compare(o) >= 0
}
// Equal tests if two versions are equal to each other.
// Note, versions can be equal with different metadata since metadata
// is not considered part of the comparable version.
func (v *Version) Equal(o *Version) bool {
if v == o {
return true
}
if v == nil || o == nil {
return false
}
return v.Compare(o) == 0
}

View File

@ -47,6 +47,14 @@ gron.IsDue(expr) // true|false, nil
gron.IsDue(expr, time.Date(2021, time.April, 1, 1, 1, 0, 0, time.UTC)) // true|false, nil
```
> Validity can be checked without instantiation:
```go
import "github.com/adhocore/gronx"
gronx.IsValid("* * * * *") // true
```
### Batch Due Check
If you have multiple cron expressions to check due on same reference time use `BatchDue()`:

View File

@ -75,7 +75,9 @@ func New() *Gronx {
// IsDue checks if cron expression is due for given reference time (or now).
// It returns bool or error if any.
func (g *Gronx) IsDue(expr string, ref ...time.Time) (bool, error) {
ref = append(ref, time.Now())
if len(ref) == 0 {
ref = append(ref, time.Now())
}
g.C.SetRef(ref[0])
segs, err := Segments(expr)
@ -157,12 +159,16 @@ func (g *Gronx) SegmentsDue(segs []string) (bool, error) {
return true, nil
}
// IsValid checks if cron expression is valid.
// It returns bool.
func (g *Gronx) IsValid(expr string) bool { return IsValid(expr) }
// checker for validity
var checker = &SegmentChecker{ref: time.Now()}
// IsValid checks if cron expression is valid.
// It returns bool.
func (g *Gronx) IsValid(expr string) bool {
func IsValid(expr string) bool {
segs, err := Segments(expr)
if err != nil {
return false

View File

@ -14,7 +14,7 @@ func inStep(val int, s string, bounds []int) (bool, error) {
if err != nil {
return false, err
}
if step == 0 {
if step <= 0 {
return false, errors.New("step can't be 0")
}

View File

@ -1,23 +0,0 @@
language: go
# See https://travis-ci.community/t/goos-js-goarch-wasm-go-run-fails-panic-newosproc-not-implemented/1651
#addons:
# chrome: stable
before_install:
- export GO111MODULE=on
#install:
#- go get github.com/agnivade/wasmbrowsertest
#- mv $GOPATH/bin/wasmbrowsertest $GOPATH/bin/go_js_wasm_exec
#- export PATH=$GOPATH/bin:$PATH
go:
- 1.13.x
- 1.14.x
- 1.15.x
- tip
script:
#- GOOS=js GOARCH=wasm go test -v
- go test -v

View File

@ -4,12 +4,10 @@ install:
go install
lint:
gofmt -l -s -w . && go vet . && golint -set_exit_status=1 .
gofmt -l -s -w . && go vet .
test: # The first 2 go gets are to support older Go versions
go get github.com/arbovm/levenshtein
go get github.com/dgryski/trifles/leven
GO111MODULE=on go test -race -v -coverprofile=coverage.txt -covermode=atomic
test:
go test -race -v -coverprofile=coverage.txt -covermode=atomic
bench:
go test -run=XXX -bench=. -benchmem -count=5

View File

@ -1,4 +1,4 @@
levenshtein [![Build Status](https://travis-ci.org/agnivade/levenshtein.svg?branch=master)](https://travis-ci.org/agnivade/levenshtein) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein)
levenshtein ![Build Status](https://github.com/agnivade/levenshtein/actions/workflows/ci.yml/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein)
===========
[Go](http://golang.org) package to calculate the [Levenshtein Distance](http://en.wikipedia.org/wiki/Levenshtein_distance)

View File

@ -41,6 +41,25 @@ func ComputeDistance(a, b string) int {
if len(s1) > len(s2) {
s1, s2 = s2, s1
}
// remove trailing identical runes.
for i := 0; i < len(s1); i++ {
if s1[len(s1)-1-i] != s2[len(s2)-1-i] {
s1 = s1[:len(s1)-i]
s2 = s2[:len(s2)-i]
break
}
}
// Remove leading identical runes.
for i := 0; i < len(s1); i++ {
if s1[i] != s2[i] {
s1 = s1[i:]
s2 = s2[i:]
break
}
}
lenS1 := len(s1)
lenS2 := len(s2)
@ -71,7 +90,7 @@ func ComputeDistance(a, b string) int {
for j := 1; j <= lenS1; j++ {
current := x[j-1] // match
if s2[i-1] != s1[j-1] {
current = min(min(x[j-1]+1, prev+1), x[j]+1)
current = min(x[j-1]+1, prev+1, x[j]+1)
}
x[j-1] = prev
prev = current
@ -80,10 +99,3 @@ func ComputeDistance(a, b string) int {
}
return int(x[lenS1])
}
func min(a, b uint16) uint16 {
if a < b {
return a
}
return b
}

View File

@ -33,6 +33,7 @@ import (
"sync"
"github.com/mholt/acmez/v2/acme"
"go.uber.org/zap"
)
// getAccount either loads or creates a new account, depending on if
@ -40,8 +41,15 @@ import (
func (am *ACMEIssuer) getAccount(ctx context.Context, ca, email string) (acme.Account, error) {
acct, err := am.loadAccount(ctx, ca, email)
if errors.Is(err, fs.ErrNotExist) {
am.Logger.Info("creating new account because no account for configured email is known to us",
zap.String("email", email),
zap.String("ca", ca),
zap.Error(err))
return am.newAccount(email)
}
am.Logger.Debug("using existing ACME account because key found in storage associated with email",
zap.String("email", email),
zap.String("ca", ca))
return acct, err
}
@ -407,6 +415,15 @@ func (am *ACMEIssuer) mostRecentAccountEmail(ctx context.Context, caURL string)
return getPrimaryContact(account), true
}
func accountRegLockKey(acc acme.Account) string {
key := "register_acme_account"
if len(acc.Contact) == 0 {
return key
}
key += "_" + getPrimaryContact(acc)
return key
}
// getPrimaryContact returns the first contact on the account (if any)
// without the scheme. (I guess we assume an email address.)
func getPrimaryContact(account acme.Account) string {

View File

@ -50,77 +50,123 @@ func (iss *ACMEIssuer) newACMEClientWithAccount(ctx context.Context, useTestCA,
return nil, err
}
// look up or create the ACME account
var account acme.Account
if iss.AccountKeyPEM != "" {
account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM))
} else {
account, err = iss.getAccount(ctx, client.Directory, iss.getEmail())
// we try loading the account from storage before a potential
// lock, and after obtaining the lock as well, to ensure we don't
// repeat work done by another instance or goroutine
getAccount := func() (acme.Account, error) {
// look up or create the ACME account
var account acme.Account
if iss.AccountKeyPEM != "" {
iss.Logger.Info("using configured ACME account")
account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM))
} else {
account, err = iss.getAccount(ctx, client.Directory, iss.getEmail())
}
if err != nil {
return acme.Account{}, fmt.Errorf("getting ACME account: %v", err)
}
return account, nil
}
// first try getting the account
account, err := getAccount()
if err != nil {
return nil, fmt.Errorf("getting ACME account: %v", err)
return nil, err
}
// register account if it is new
if account.Status == "" {
if iss.NewAccountFunc != nil {
// obtain lock here, since NewAccountFunc calls happen concurrently and they typically read and change the issuer
iss.mu.Lock()
account, err = iss.NewAccountFunc(ctx, iss, account)
iss.mu.Unlock()
if err != nil {
return nil, fmt.Errorf("account pre-registration callback: %v", err)
iss.Logger.Info("ACME account has empty status; registering account with ACME server",
zap.Strings("contact", account.Contact),
zap.String("location", account.Location))
// synchronize this so the account is only created once
acctLockKey := accountRegLockKey(account)
err = acquireLock(ctx, iss.config.Storage, acctLockKey)
if err != nil {
return nil, fmt.Errorf("locking account registration: %v", err)
}
defer func() {
if err := releaseLock(ctx, iss.config.Storage, acctLockKey); err != nil {
iss.Logger.Error("failed to unlock account registration lock", zap.Error(err))
}
}()
// if we're not the only one waiting for this account, then by this point it should already be registered and in storage; reload it
account, err = getAccount()
if err != nil {
return nil, err
}
// agree to terms
if interactive {
if !iss.isAgreed() {
var termsURL string
dir, err := client.GetDirectory(ctx)
// if we are the only or first one waiting for this account, then proceed to register it while we have the lock
if account.Status == "" {
if iss.NewAccountFunc != nil {
// obtain lock here, since NewAccountFunc calls happen concurrently and they typically read and change the issuer
iss.mu.Lock()
account, err = iss.NewAccountFunc(ctx, iss, account)
iss.mu.Unlock()
if err != nil {
return nil, fmt.Errorf("getting directory: %w", err)
return nil, fmt.Errorf("account pre-registration callback: %v", err)
}
if dir.Meta != nil {
termsURL = dir.Meta.TermsOfService
}
if termsURL != "" {
agreed := iss.askUserAgreement(termsURL)
if !agreed {
return nil, fmt.Errorf("user must agree to CA terms")
}
// agree to terms
if interactive {
if !iss.isAgreed() {
var termsURL string
dir, err := client.GetDirectory(ctx)
if err != nil {
return nil, fmt.Errorf("getting directory: %w", err)
}
if dir.Meta != nil {
termsURL = dir.Meta.TermsOfService
}
if termsURL != "" {
agreed := iss.askUserAgreement(termsURL)
if !agreed {
return nil, fmt.Errorf("user must agree to CA terms")
}
iss.mu.Lock()
iss.agreed = agreed
iss.mu.Unlock()
}
iss.mu.Lock()
iss.agreed = agreed
iss.mu.Unlock()
}
} else {
// can't prompt a user who isn't there; they should
// have reviewed the terms beforehand
iss.mu.Lock()
iss.agreed = true
iss.mu.Unlock()
}
account.TermsOfServiceAgreed = iss.isAgreed()
// associate account with external binding, if configured
if iss.ExternalAccount != nil {
err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount)
if err != nil {
return nil, err
}
}
// create account
account, err = client.NewAccount(ctx, account)
if err != nil {
return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err)
}
iss.Logger.Info("new ACME account registered",
zap.Strings("contact", account.Contact),
zap.String("status", account.Status))
// persist the account to storage
err = iss.saveAccount(ctx, client.Directory, account)
if err != nil {
return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err)
}
} else {
// can't prompt a user who isn't there; they should
// have reviewed the terms beforehand
iss.mu.Lock()
iss.agreed = true
iss.mu.Unlock()
}
account.TermsOfServiceAgreed = iss.isAgreed()
// associate account with external binding, if configured
if iss.ExternalAccount != nil {
err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount)
if err != nil {
return nil, err
}
}
// create account
account, err = client.NewAccount(ctx, account)
if err != nil {
return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err)
}
// persist the account to storage
err = iss.saveAccount(ctx, client.Directory, account)
if err != nil {
return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err)
iss.Logger.Info("account has already been registered; reloaded",
zap.Strings("contact", account.Contact),
zap.String("status", account.Status),
zap.String("location", account.Location))
}
}

View File

@ -461,7 +461,7 @@ func (am *ACMEIssuer) doIssue(ctx context.Context, csr *x509.CertificateRequest,
// between client and server or some sort of bookkeeping error with regards to the certID
// and the server is rejecting the ARI certID. In any case, an invalid certID may cause
// orders to fail. So try once without setting it.
if !usingTestCA && attempts != 2 {
if !am.config.DisableARI && !usingTestCA && attempts != 2 {
if replacing, ok := ctx.Value(ctxKeyARIReplaces).(*x509.Certificate); ok {
params.Replaces = replacing
}

View File

@ -103,53 +103,54 @@ func (cfg *Config) certNeedsRenewal(leaf *x509.Certificate, ari acme.RenewalInfo
logger = zap.NewNop()
}
// first check ARI: if it says it's time to renew, it's time to renew
// (notice that we don't strictly require an ARI window to also exist; we presume
// that if a time has been selected, a window does or did exist, even if it didn't
// get stored/encoded for some reason - but also: this allows administrators to
// manually or explicitly schedule a renewal time indepedently of ARI which could
// be useful)
selectedTime := ari.SelectedTime
if !cfg.DisableARI {
// first check ARI: if it says it's time to renew, it's time to renew
// (notice that we don't strictly require an ARI window to also exist; we presume
// that if a time has been selected, a window does or did exist, even if it didn't
// get stored/encoded for some reason - but also: this allows administrators to
// manually or explicitly schedule a renewal time indepedently of ARI which could
// be useful)
selectedTime := ari.SelectedTime
// if, for some reason a random time in the window hasn't been selected yet, but an ARI
// window does exist, we can always improvise one... even if this is called repeatedly,
// a random time is a random time, whether you generate it once or more :D
// (code borrowed from our acme package)
if selectedTime.IsZero() &&
(!ari.SuggestedWindow.Start.IsZero() && !ari.SuggestedWindow.End.IsZero()) {
start, end := ari.SuggestedWindow.Start.Unix()+1, ari.SuggestedWindow.End.Unix()
selectedTime = time.Unix(rand.Int63n(end-start)+start, 0).UTC()
logger.Warn("no renewal time had been selected with ARI; chose an ephemeral one for now",
zap.Time("ephemeral_selected_time", selectedTime))
}
// if a renewal time has been selected, start with that
if !selectedTime.IsZero() {
// ARI spec recommends an algorithm that renews after the randomly-selected
// time OR just before it if the next waking time would be after it; this
// cutoff can actually be before the start of the renewal window, but the spec
// author says that's OK: https://github.com/aarongable/draft-acme-ari/issues/71
cutoff := ari.SelectedTime.Add(-cfg.certCache.options.RenewCheckInterval)
if time.Now().After(cutoff) {
logger.Info("certificate needs renewal based on ARI window",
zap.Time("selected_time", selectedTime),
zap.Time("renewal_cutoff", cutoff))
return true
// if, for some reason a random time in the window hasn't been selected yet, but an ARI
// window does exist, we can always improvise one... even if this is called repeatedly,
// a random time is a random time, whether you generate it once or more :D
// (code borrowed from our acme package)
if selectedTime.IsZero() &&
(!ari.SuggestedWindow.Start.IsZero() && !ari.SuggestedWindow.End.IsZero()) {
start, end := ari.SuggestedWindow.Start.Unix()+1, ari.SuggestedWindow.End.Unix()
selectedTime = time.Unix(rand.Int63n(end-start)+start, 0).UTC()
logger.Warn("no renewal time had been selected with ARI; chose an ephemeral one for now",
zap.Time("ephemeral_selected_time", selectedTime))
}
// according to ARI, we are not ready to renew; however, we do not rely solely on
// ARI calculations... what if there is a bug in our implementation, or in the
// server's, or the stored metadata? for redundancy, give credence to the expiration
// date; ignore ARI if we are past a "dangerously close" limit, to avoid any
// possibility of a bug in ARI compromising a site's uptime: we should always always
// always give heed to actual validity period
if currentlyInRenewalWindow(leaf.NotBefore, expiration, 1.0/20.0) {
logger.Warn("certificate is in emergency renewal window; superceding ARI",
zap.Duration("remaining", time.Until(expiration)),
zap.Time("renewal_cutoff", cutoff))
return true
}
// if a renewal time has been selected, start with that
if !selectedTime.IsZero() {
// ARI spec recommends an algorithm that renews after the randomly-selected
// time OR just before it if the next waking time would be after it; this
// cutoff can actually be before the start of the renewal window, but the spec
// author says that's OK: https://github.com/aarongable/draft-acme-ari/issues/71
cutoff := ari.SelectedTime.Add(-cfg.certCache.options.RenewCheckInterval)
if time.Now().After(cutoff) {
logger.Info("certificate needs renewal based on ARI window",
zap.Time("selected_time", selectedTime),
zap.Time("renewal_cutoff", cutoff))
return true
}
// according to ARI, we are not ready to renew; however, we do not rely solely on
// ARI calculations... what if there is a bug in our implementation, or in the
// server's, or the stored metadata? for redundancy, give credence to the expiration
// date; ignore ARI if we are past a "dangerously close" limit, to avoid any
// possibility of a bug in ARI compromising a site's uptime: we should always always
// always give heed to actual validity period
if currentlyInRenewalWindow(leaf.NotBefore, expiration, 1.0/20.0) {
logger.Warn("certificate is in emergency renewal window; superceding ARI",
zap.Duration("remaining", time.Until(expiration)),
zap.Time("renewal_cutoff", cutoff))
return true
}
}
}
// the normal check, in the absence of ARI, is to determine if we're near enough (or past)
@ -552,6 +553,7 @@ func SubjectIsInternal(subj string) bool {
return subj == "localhost" ||
strings.HasSuffix(subj, ".localhost") ||
strings.HasSuffix(subj, ".local") ||
strings.HasSuffix(subj, ".internal") ||
strings.HasSuffix(subj, ".home.arpa") ||
isInternalIP(subj)
}

View File

@ -149,6 +149,10 @@ type Config struct {
// EXPERIMENTAL: Subject to change or removal.
SubjectTransformer func(ctx context.Context, domain string) string
// Disables both ARI fetching and the use of ARI for renewal decisions.
// TEMPORARY: Will likely be removed in the future.
DisableARI bool
// Set a logger to enable logging. If not set,
// a default logger will be created.
Logger *zap.Logger
@ -370,9 +374,11 @@ func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bo
}
for _, domainName := range domainNames {
domainName = normalizedName(domainName)
// if on-demand is configured, defer obtain and renew operations
if cfg.OnDemand != nil {
cfg.OnDemand.hostAllowlist[normalizedName(domainName)] = struct{}{}
cfg.OnDemand.hostAllowlist[domainName] = struct{}{}
continue
}
@ -449,7 +455,7 @@ func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool)
// ensure ARI is updated before we check whether the cert needs renewing
// (we ignore the second return value because we already check if needs renewing anyway)
if cert.ari.NeedsRefresh() {
if !cfg.DisableARI && cert.ari.NeedsRefresh() {
cert, _, err = cfg.updateARI(ctx, cert, cfg.Logger)
if err != nil {
cfg.Logger.Error("updating ARI upon managing", zap.Error(err))
@ -886,11 +892,13 @@ func (cfg *Config) renewCert(ctx context.Context, name string, force, interactiv
// if we're renewing with the same ACME CA as before, have the ACME
// client tell the server we are replacing a certificate (but doing
// this on the wrong CA, or when the CA doesn't recognize the certID,
// can fail the order)
if acmeData, err := certRes.getACMEData(); err == nil && acmeData.CA != "" {
if acmeIss, ok := issuer.(*ACMEIssuer); ok {
if acmeIss.CA == acmeData.CA {
ctx = context.WithValue(ctx, ctxKeyARIReplaces, leaf)
// can fail the order) -- TODO: change this check to whether we're using the same ACME account, not CA
if !cfg.DisableARI {
if acmeData, err := certRes.getACMEData(); err == nil && acmeData.CA != "" {
if acmeIss, ok := issuer.(*ACMEIssuer); ok {
if acmeIss.CA == acmeData.CA {
ctx = context.WithValue(ctx, ctxKeyARIReplaces, leaf)
}
}
}
}
@ -982,23 +990,26 @@ func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string, useC
csrTemplate := new(x509.CertificateRequest)
for _, name := range sans {
// identifiers should be converted to punycode before going into the CSR
// (convert IDNs to ASCII according to RFC 5280 section 7)
normalizedName, err := idna.ToASCII(name)
if err != nil {
return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err)
}
// TODO: This is a temporary hack to support ZeroSSL API...
if useCN && csrTemplate.Subject.CommonName == "" && len(name) <= 64 {
csrTemplate.Subject.CommonName = name
if useCN && csrTemplate.Subject.CommonName == "" && len(normalizedName) <= 64 {
csrTemplate.Subject.CommonName = normalizedName
continue
}
if ip := net.ParseIP(name); ip != nil {
if ip := net.ParseIP(normalizedName); ip != nil {
csrTemplate.IPAddresses = append(csrTemplate.IPAddresses, ip)
} else if strings.Contains(name, "@") {
csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, name)
} else if u, err := url.Parse(name); err == nil && strings.Contains(name, "/") {
} else if strings.Contains(normalizedName, "@") {
csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, normalizedName)
} else if u, err := url.Parse(normalizedName); err == nil && strings.Contains(normalizedName, "/") {
csrTemplate.URIs = append(csrTemplate.URIs, u)
} else {
// convert IDNs to ASCII according to RFC 5280 section 7
normalizedName, err := idna.ToASCII(name)
if err != nil {
return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err)
}
csrTemplate.DNSNames = append(csrTemplate.DNSNames, normalizedName)
}
}
@ -1007,6 +1018,16 @@ func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string, useC
csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, mustStapleExtension)
}
// IP addresses aren't printed here because I'm too lazy to marshal them as strings, but
// we at least print the incoming SANs so it should be obvious what became IPs
cfg.Logger.Debug("created CSR",
zap.Strings("identifiers", sans),
zap.Strings("san_dns_names", csrTemplate.DNSNames),
zap.Strings("san_emails", csrTemplate.EmailAddresses),
zap.String("common_name", csrTemplate.Subject.CommonName),
zap.Int("extra_extensions", len(csrTemplate.ExtraExtensions)),
)
csrDER, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privateKey)
if err != nil {
return nil, err
@ -1244,8 +1265,10 @@ func (cfg *Config) managedCertNeedsRenewal(certRes CertificateResource, emitLogs
return 0, nil, true
}
var ari acme.RenewalInfo
if ariPtr, err := certRes.getARI(); err == nil && ariPtr != nil {
ari = *ariPtr
if !cfg.DisableARI {
if ariPtr, err := certRes.getARI(); err == nil && ariPtr != nil {
ari = *ariPtr
}
}
remaining := time.Until(expiresAt(certChain[0]))
return remaining, certChain[0], cfg.certNeedsRenewal(certChain[0], ari, emitLogs)

View File

@ -27,6 +27,8 @@ import (
"path/filepath"
"runtime"
"time"
"github.com/caddyserver/certmagic/internal/atomicfile"
)
// FileStorage facilitates forming file paths derived from a root
@ -82,12 +84,30 @@ func (s *FileStorage) Store(_ context.Context, key string, value []byte) error {
if err != nil {
return err
}
return os.WriteFile(filename, value, 0600)
fp, err := atomicfile.New(filename, 0o600)
if err != nil {
return err
}
_, err = fp.Write(value)
if err != nil {
// cancel the write
fp.Cancel()
return err
}
// close, thereby flushing the write
return fp.Close()
}
// Load retrieves the value at key.
func (s *FileStorage) Load(_ context.Context, key string) ([]byte, error) {
return os.ReadFile(s.Filename(key))
// i believe it's possible for the read call to error but still return bytes, in event of something like a shortread?
// therefore, i think it's appropriate to not return any bytes to avoid downstream users of the package erroniously believing that
// bytes read + error is a valid response (it should not be)
xs, err := os.ReadFile(s.Filename(key))
if err != nil {
return nil, err
}
return xs, nil
}
// Delete deletes the value at key.

View File

@ -582,7 +582,7 @@ func (cfg *Config) handshakeMaintenance(ctx context.Context, hello *tls.ClientHe
}
// Check ARI status
if cert.ari.NeedsRefresh() {
if !cfg.DisableARI && cert.ari.NeedsRefresh() {
// we ignore the second return value here because we go on to check renewal status below regardless
var err error
cert, _, err = cfg.updateARI(ctx, cert, logger)

View File

@ -0,0 +1,11 @@
# atomic file
this is copied from
https://github.com/containerd/containerd/blob/main/pkg%2Fatomicfile%2Ffile.go
see
https://github.com/caddyserver/certmagic/issues/296

View File

@ -0,0 +1,148 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package atomicfile provides a mechanism (on Unix-like platforms) to present a consistent view of a file to separate
processes even while the file is being written. This is accomplished by writing a temporary file, syncing to disk, and
renaming over the destination file name.
Partial/inconsistent reads can occur due to:
1. A process attempting to read the file while it is being written to (both in the case of a new file with a
short/incomplete write or in the case of an existing, updated file where new bytes may be written at the beginning
but old bytes may still be present after).
2. Concurrent goroutines leading to multiple active writers of the same file.
The above mechanism explicitly protects against (1) as all writes are to a file with a temporary name.
There is no explicit protection against multiple, concurrent goroutines attempting to write the same file. However,
atomically writing the file should mean only one writer will "win" and a consistent file will be visible.
Note: atomicfile is partially implemented for Windows. The Windows codepath performs the same operations, however
Windows does not guarantee that a rename operation is atomic; a crash in the middle may leave the destination file
truncated rather than with the expected content.
*/
package atomicfile
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sync"
)
// File is an io.ReadWriteCloser that can also be Canceled if a change needs to be abandoned.
type File interface {
io.ReadWriteCloser
// Cancel abandons a change to a file. This can be called if a write fails or another error occurs.
Cancel() error
}
// ErrClosed is returned if Read or Write are called on a closed File.
var ErrClosed = errors.New("file is closed")
// New returns a new atomic file. On Unix-like platforms, the writer (an io.ReadWriteCloser) is backed by a temporary
// file placed into the same directory as the destination file (using filepath.Dir to split the directory from the
// name). On a call to Close the temporary file is synced to disk and renamed to its final name, hiding any previous
// file by the same name.
//
// Note: Take care to call Close and handle any errors that are returned. Errors returned from Close may indicate that
// the file was not written with its final name.
func New(name string, mode os.FileMode) (File, error) {
return newFile(name, mode)
}
type atomicFile struct {
name string
f *os.File
closed bool
closedMu sync.RWMutex
}
func newFile(name string, mode os.FileMode) (File, error) {
dir := filepath.Dir(name)
f, err := os.CreateTemp(dir, "")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
if err := f.Chmod(mode); err != nil {
return nil, fmt.Errorf("failed to change temp file permissions: %w", err)
}
return &atomicFile{name: name, f: f}, nil
}
func (a *atomicFile) Close() (err error) {
a.closedMu.Lock()
defer a.closedMu.Unlock()
if a.closed {
return nil
}
a.closed = true
defer func() {
if err != nil {
_ = os.Remove(a.f.Name()) // ignore errors
}
}()
// The order of operations here is:
// 1. sync
// 2. close
// 3. rename
// While the ordering of 2 and 3 is not important on Unix-like operating systems, Windows cannot rename an open
// file. By closing first, we allow the rename operation to succeed.
if err = a.f.Sync(); err != nil {
return fmt.Errorf("failed to sync temp file %q: %w", a.f.Name(), err)
}
if err = a.f.Close(); err != nil {
return fmt.Errorf("failed to close temp file %q: %w", a.f.Name(), err)
}
if err = os.Rename(a.f.Name(), a.name); err != nil {
return fmt.Errorf("failed to rename %q to %q: %w", a.f.Name(), a.name, err)
}
return nil
}
func (a *atomicFile) Cancel() error {
a.closedMu.Lock()
defer a.closedMu.Unlock()
if a.closed {
return nil
}
a.closed = true
_ = a.f.Close() // ignore error
return os.Remove(a.f.Name())
}
func (a *atomicFile) Read(p []byte) (n int, err error) {
a.closedMu.RLock()
defer a.closedMu.RUnlock()
if a.closed {
return 0, ErrClosed
}
return a.f.Read(p)
}
func (a *atomicFile) Write(p []byte) (n int, err error) {
a.closedMu.RLock()
defer a.closedMu.RUnlock()
if a.closed {
return 0, ErrClosed
}
return a.f.Write(p)
}

View File

@ -136,7 +136,7 @@ func (certCache *Cache) RenewManagedCertificates(ctx context.Context) error {
}
// ACME-specific: see if if ACME Renewal Info (ARI) window needs refreshing
if cert.ari.NeedsRefresh() {
if !cfg.DisableARI && cert.ari.NeedsRefresh() {
configs[cert.hash] = cfg
ariQueue = append(ariQueue, cert)
}
@ -427,7 +427,7 @@ func (cfg *Config) storageHasNewerARI(ctx context.Context, cert Certificate) (bo
// or if the one in storage has a later RetryAfter (though I suppose
// it's not guaranteed, typically those will move forward in time)
if (!cert.ari.HasWindow() && storedCertData.RenewalInfo.HasWindow()) ||
storedCertData.RenewalInfo.RetryAfter.After(*cert.ari.RetryAfter) {
(cert.ari.RetryAfter == nil || storedCertData.RenewalInfo.RetryAfter.After(*cert.ari.RetryAfter)) {
return true, *storedCertData.RenewalInfo, nil
}
return false, acme.RenewalInfo{}, nil
@ -459,6 +459,9 @@ func (cfg *Config) loadStoredACMECertificateMetadata(ctx context.Context, cert C
// updated in the cache. The certificate with the updated ARI is returned. If true is
// returned, the ARI window or selected time has changed, and the caller should check if
// the cert needs to be renewed now, even if there is an error.
//
// This will always try to ARI without checking if it needs to be refreshed. Call
// NeedsRefresh() on the RenewalInfo first, and only call this if that returns true.
func (cfg *Config) updateARI(ctx context.Context, cert Certificate, logger *zap.Logger) (updatedCert Certificate, changed bool, err error) {
logger = logger.With(
zap.Strings("identifiers", cert.Names),
@ -469,6 +472,17 @@ func (cfg *Config) updateARI(ctx context.Context, cert Certificate, logger *zap.
updatedCert = cert
oldARI := cert.ari
// synchronize ARI fetching; see #297
lockName := "ari_" + cert.ari.UniqueIdentifier
if err := acquireLock(ctx, cfg.Storage, lockName); err != nil {
return cert, false, fmt.Errorf("unable to obtain ARI lock: %v", err)
}
defer func() {
if err := releaseLock(ctx, cfg.Storage, lockName); err != nil {
logger.Error("unable to release ARI lock", zap.Error(err))
}
}()
// see if the stored value has been refreshed already by another instance
gotNewARI, newARI, err := cfg.storageHasNewerARI(ctx, cert)
@ -615,11 +629,11 @@ func CleanStorage(ctx context.Context, storage Storage, opts CleanStorageOptions
opts.Logger = opts.Logger.With(zap.Any("storage", storage))
// storage cleaning should be globally exclusive
if err := storage.Lock(ctx, lockName); err != nil {
if err := acquireLock(ctx, storage, lockName); err != nil {
return fmt.Errorf("unable to acquire %s lock: %v", lockName, err)
}
defer func() {
if err := storage.Unlock(ctx, lockName); err != nil {
if err := releaseLock(ctx, storage, lockName); err != nil {
opts.Logger.Error("unable to release lock", zap.Error(err))
return
}

View File

@ -146,7 +146,7 @@ func (iss *ZeroSSLIssuer) Issue(ctx context.Context, csr *x509.CertificateReques
// create the CNAME record(s)
records := make(map[string]zoneRecord, len(cert.Validation.OtherMethods))
for name, verifyInfo := range cert.Validation.OtherMethods {
zr, err := iss.CNAMEValidation.createRecord(ctx, verifyInfo.CnameValidationP1, "CNAME", verifyInfo.CnameValidationP2)
zr, err := iss.CNAMEValidation.createRecord(ctx, verifyInfo.CnameValidationP1, "CNAME", verifyInfo.CnameValidationP2+".") // see issue #304
if err != nil {
return nil, fmt.Errorf("creating CNAME record: %v", err)
}

View File

@ -1,7 +1,7 @@
Package validator
=================
<img align="right" src="logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-10.22.0-green.svg)
![Project status](https://img.shields.io/badge/version-10.22.1-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)

View File

@ -1828,7 +1828,14 @@ func requireCheckFieldValue(
return int64(field.Len()) == asInt(value)
case reflect.Bool:
return field.Bool() == asBool(value)
return field.Bool() == (value == "true")
case reflect.Ptr:
if field.IsNil() {
return value == "nil"
}
// Handle non-nil pointers
return requireCheckFieldValue(fl, param, value, defaultNotFoundValue)
}
// default reflect.String:

View File

@ -1749,7 +1749,7 @@ func (r *Raft) requestPreVote(rpc RPC, req *RequestPreVoteRequest) {
}()
// Check if we have an existing leader [who's not the candidate] and also
var candidate ServerAddress
candidate := r.trans.DecodePeer(req.GetRPCHeader().Addr)
candidateID := ServerID(req.ID)
// if the Servers list is empty that mean the cluster is very likely trying to bootstrap,
@ -1805,7 +1805,6 @@ func (r *Raft) requestPreVote(rpc RPC, req *RequestPreVoteRequest) {
}
resp.Granted = true
r.setLastContact()
}
// installSnapshot is invoked when we get a InstallSnapshot RPC call.

View File

@ -16,6 +16,20 @@ This package provides various compression algorithms.
# changelog
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
* Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
* zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
* zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
* zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
* s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
* s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
@ -81,7 +95,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
@ -136,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
* zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
@ -339,7 +353,7 @@ While the release has been extensively tested, it is recommended to testing when
* s2: Fix binaries.
* Feb 25, 2021 (v1.11.8)
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
* s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
@ -518,7 +532,7 @@ While the release has been extensively tested, it is recommended to testing when
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.

View File

@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) {
}
switch d.compressionLevel.chain {
case 0:
// level was NoCompression or ConstantCompresssion.
// level was NoCompression or ConstantCompression.
d.windowEnd = 0
default:
s := d.state

View File

@ -298,6 +298,14 @@ const (
huffmanGenericReader
)
// flushMode tells decompressor when to return data
type flushMode uint8
const (
syncFlush flushMode = iota // return data after sync flush block
partialFlush // return data after each block
)
// Decompress state.
type decompressor struct {
// Input source.
@ -332,6 +340,8 @@ type decompressor struct {
nb uint
final bool
flushMode flushMode
}
func (f *decompressor) nextBlock() {
@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() {
}
if n == 0 {
f.toRead = f.dict.readFlush()
if f.flushMode == syncFlush {
f.toRead = f.dict.readFlush()
}
f.finishBlock()
return
}
@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() {
if f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
f.err = io.EOF
} else if f.flushMode == partialFlush && f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
f.step = nextBlock
}
@ -789,6 +806,41 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
return nil
}
type ReaderOpt func(*decompressor)
// WithPartialBlock tells decompressor to return after each block,
// so it can read data written with partial flush
func WithPartialBlock() ReaderOpt {
return func(f *decompressor) {
f.flushMode = partialFlush
}
}
// WithDict initializes the reader with a preset dictionary
func WithDict(dict []byte) ReaderOpt {
return func(f *decompressor) {
f.dict.init(maxMatchOffset, dict)
}
}
// NewReaderOpts returns new reader with provided options
func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = nextBlock
f.dict.init(maxMatchOffset, nil)
for _, opt := range opts {
opt(&f)
}
return &f
}
// NewReader returns a new ReadCloser that can be used
// to read the uncompressed version of r.
// If r does not also implement io.ByteReader,
@ -798,15 +850,7 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReader(r io.Reader) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = nextBlock
f.dict.init(maxMatchOffset, nil)
return &f
return NewReaderOpts(r)
}
// NewReaderDict is like NewReader but initializes the reader
@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser {
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = nextBlock
f.dict.init(maxMatchOffset, dict)
return &f
return NewReaderOpts(r, WithDict(dict))
}

View File

@ -15,7 +15,7 @@ const (
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
// Use a predefined Scrach to set maximum acceptable output size.
// Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {

View File

@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 0 {
fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue
}
// Ensure that all combinations are covered.
@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 20 {
fmt.Fprintf(w, "%d errros, stopping\n", errs)
fmt.Fprintf(w, "%d errors, stopping\n", errs)
break
}
}

View File

@ -83,11 +83,14 @@ type Writer struct {
snappy bool
flushOnWrite bool
appendIndex bool
bufferCB func([]byte)
level uint8
}
type result struct {
b []byte
// return when writing
ret []byte
// Uncompressed start offset
startOffset int64
}
@ -146,6 +149,10 @@ func (w *Writer) Reset(writer io.Writer) {
for write := range toWrite {
// Wait for the data to be available.
input := <-write
if input.ret != nil && w.bufferCB != nil {
w.bufferCB(input.ret)
input.ret = nil
}
in := input.b
if len(in) > 0 {
if w.err(nil) == nil {
@ -341,7 +348,8 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
// but the input buffer cannot be written to by the caller
// until Flush or Close has been called when concurrency != 1.
//
// If you cannot control that, use the regular Write function.
// Use the WriterBufferDone to receive a callback when the buffer is done
// Processing.
//
// Note that input is not buffered.
// This means that each write will result in discrete blocks being created.
@ -364,6 +372,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
}
if w.concurrency == 1 {
_, err := w.writeSync(buf)
if w.bufferCB != nil {
w.bufferCB(buf)
}
return err
}
@ -378,7 +389,7 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
}
}
orgBuf := buf
for len(buf) > 0 {
// Cut input.
uncompressed := buf
@ -397,6 +408,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
startOffset: w.uncompWritten,
}
w.uncompWritten += int64(len(uncompressed))
if len(buf) == 0 && w.bufferCB != nil {
res.ret = orgBuf
}
go func() {
race.ReadSlice(uncompressed)
@ -922,7 +936,7 @@ func WriterBetterCompression() WriterOption {
}
// WriterBestCompression will enable better compression.
// EncodeBetter compresses better than Encode but typically with a
// EncodeBest compresses better than Encode but typically with a
// big speed decrease on compression.
func WriterBestCompression() WriterOption {
return func(w *Writer) error {
@ -941,6 +955,17 @@ func WriterUncompressed() WriterOption {
}
}
// WriterBufferDone will perform a callback when EncodeBuffer has finished
// writing a buffer to the output and the buffer can safely be reused.
// If the buffer was split into several blocks, it will be sent after the last block.
// Callbacks will not be done concurrently.
func WriterBufferDone(fn func(b []byte)) WriterOption {
return func(w *Writer) error {
w.bufferCB = fn
return nil
}
}
// WriterBlockSize allows to override the default block size.
// Blocks will be this size or smaller.
// Minimum size is 4KB and maximum size is 4MB.

View File

@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
printf("RLE set to 0x%x, code: %v", symb, v)
}
case compModeFSE:
println("Reading table for", tableIndex(i))
if debugDecoder {
println("Reading table for", tableIndex(i))
}
if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}

View File

@ -179,9 +179,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(lenght - zstdMinMatch)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@ -210,12 +210,12 @@ encodeLoop:
// Index match start+1 (long) -> s - 1
index0 := s + repOff
s += lenght + repOff
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, lenght)
println("repeat ended", s, length)
}
break encodeLoop
@ -241,9 +241,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
seq.matchLen = uint32(lenght - zstdMinMatch)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@ -270,11 +270,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
s += lenght + repOff2
s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, lenght)
println("repeat ended", s, length)
}
break encodeLoop
@ -708,9 +708,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(lenght - zstdMinMatch)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@ -738,12 +738,12 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
s += lenght + repOff
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, lenght)
println("repeat ended", s, length)
}
break encodeLoop
@ -772,9 +772,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
seq.matchLen = uint32(lenght - zstdMinMatch)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@ -801,11 +801,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
s += lenght + repOff2
s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, lenght)
println("repeat ended", s, length)
}
break encodeLoop

View File

@ -138,9 +138,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(lenght - zstdMinMatch)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@ -166,11 +166,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += lenght + repOff
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, lenght)
println("repeat ended", s, length)
}
break encodeLoop
@ -798,9 +798,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(lenght - zstdMinMatch)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@ -826,11 +826,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += lenght + repOff
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, lenght)
println("repeat ended", s, length)
}
break encodeLoop

View File

@ -202,7 +202,7 @@ func (e *Encoder) nextBlock(final bool) error {
return nil
}
if final && len(s.filling) > 0 {
s.current = e.EncodeAll(s.filling, s.current[:0])
s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
@ -469,6 +469,15 @@ func (e *Encoder) Close() error {
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
e.init.Do(e.initialize)
enc := <-e.encoders
defer func() {
e.encoders <- enc
}()
return e.encodeAll(enc, src, dst)
}
func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
if len(src) == 0 {
if e.o.fullZero {
// Add frame header.
@ -491,13 +500,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
return dst
}
e.init.Do(e.initialize)
enc := <-e.encoders
defer func() {
// Release encoder reference to last block.
// If a non-single block is needed the encoder will reset again.
e.encoders <- enc
}()
// Use single segments when above minimum window and below window size.
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {

View File

@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
}
return err
}
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
if debugDecoder {
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
}
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)

View File

@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default:
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
}
s.seqSize += ctx.litRemain
@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
return io.ErrUnexpectedEOF
}
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
}
if ctx.litRemain < 0 {

Some files were not shown because too many files have changed in this diff Show More