Update dependencies

This commit is contained in:
Ingo Oppermann 2023-07-03 21:31:46 +02:00
parent c5d52dbeb3
commit e71a352521
No known key found for this signature in database
GPG Key ID: 2AB32426E9DD229E
72 changed files with 2174 additions and 866 deletions

20
go.mod
View File

@ -3,11 +3,11 @@ module github.com/datarhei/core/v16
go 1.18
require (
github.com/99designs/gqlgen v0.17.33
github.com/99designs/gqlgen v0.17.34
github.com/Masterminds/semver/v3 v3.2.1
github.com/adhocore/gronx v1.6.3
github.com/atrox/haikunatorgo/v2 v2.0.1
github.com/caddyserver/certmagic v0.18.0
github.com/caddyserver/certmagic v0.18.2
github.com/casbin/casbin/v2 v2.71.1
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230627120001-16d06aa77802
github.com/datarhei/gosrt v0.5.2
@ -28,14 +28,14 @@ require (
github.com/lestrrat-go/strftime v1.0.6
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.19
github.com/minio/minio-go/v7 v7.0.57
github.com/minio/minio-go/v7 v7.0.59
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
github.com/prometheus/client_golang v1.16.0
github.com/shirou/gopsutil/v3 v3.23.5
github.com/shirou/gopsutil/v3 v3.23.6
github.com/stretchr/testify v1.8.4
github.com/swaggo/echo-swagger v1.4.0
github.com/swaggo/swag v1.16.1
github.com/vektah/gqlparser/v2 v2.5.3
github.com/vektah/gqlparser/v2 v2.5.6
github.com/xeipuuv/gojsonschema v1.2.0
go.etcd.io/bbolt v1.3.7
go.uber.org/automaxprocs v1.5.2
@ -73,11 +73,11 @@ require (
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.3 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.4 // indirect
github.com/iancoleman/orderedmap v0.2.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.6 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/labstack/gommon v0.4.0 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/libdns/libdns v0.2.1 // indirect
@ -85,8 +85,8 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mholt/acmez v1.1.1 // indirect
github.com/miekg/dns v1.1.54 // indirect
github.com/mholt/acmez v1.2.0 // indirect
github.com/miekg/dns v1.1.55 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
@ -120,7 +120,7 @@ require (
golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.10.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

41
go.sum
View File

@ -1,5 +1,5 @@
github.com/99designs/gqlgen v0.17.33 h1:VTUpAtElDszatPSe26N0SD0deJCSxb7TZLlUb6JnVRY=
github.com/99designs/gqlgen v0.17.33/go.mod h1:ygDK+m8zGpoQuSh8xoq80UfisR5JTZr7mN57qXlSIZs=
github.com/99designs/gqlgen v0.17.34 h1:5cS5/OKFguQt+Ws56uj9FlG2xm1IlcJWNF2jrMIKYFQ=
github.com/99designs/gqlgen v0.17.34/go.mod h1:Axcd3jIFHBVcqzixujJQr1wGqE+lGTpz6u4iZBZg1G8=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=
@ -34,8 +34,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/caddyserver/certmagic v0.18.0 h1:L22mJES1WllfLoHUcQUy4wVO7UfOsoL5wtg/Bj7kmIw=
github.com/caddyserver/certmagic v0.18.0/go.mod h1:e0YLTnXIopZ05bBWCLzpIf1Yvk27Q90FGUmGowFRDY8=
github.com/caddyserver/certmagic v0.18.2 h1:Nj2+M+A2Ho9IF6n1wUSbra4mX1X6ALzWpul9HooprHA=
github.com/caddyserver/certmagic v0.18.2/go.mod h1:cLsgYXecH1iVUPjDXw15/1SKjZk/TK+aFfQk5FnugGQ=
github.com/casbin/casbin/v2 v2.71.1 h1:LRHyqM0S1LzM/K59PmfUIN0ZJfLgcOjL4OhOQI/FNXU=
github.com/casbin/casbin/v2 v2.71.1/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -138,8 +138,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.3 h1:kmRrRLlInXvng0SmLxmQpQkpbYAvcXm7NPDrgxJa9mE=
github.com/hashicorp/golang-lru/v2 v2.0.3/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/golang-lru/v2 v2.0.4 h1:7GHuZcgid37q8o5i3QI9KMT4nCWQQ3Kx3Ov6bb9MfK0=
github.com/hashicorp/golang-lru/v2 v2.0.4/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
github.com/hashicorp/raft v1.5.0 h1:uNs9EfJ4FwiArZRxxfd/dQ5d33nV31/CdCHArH89hT8=
github.com/hashicorp/raft v1.5.0/go.mod h1:pKHB2mf/Y25u3AHNSXVRv+yT+WAnmeTX0BwVppVQV+M=
@ -161,8 +161,8 @@ github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@ -210,14 +210,14 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mholt/acmez v1.1.1 h1:sYeeYd/EHVm9cSmLdWey5oW/fXFVAq5pNLjSczN2ZUg=
github.com/mholt/acmez v1.1.1/go.mod h1:VT9YwH1xgNX1kmYY89gY8xPJC84BFAisjo8Egigt4kE=
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/mholt/acmez v1.2.0 h1:1hhLxSgY5FvH5HCnGUuwbKY2VQVo8IU7rxXKSnZ7F30=
github.com/mholt/acmez v1.2.0/go.mod h1:VT9YwH1xgNX1kmYY89gY8xPJC84BFAisjo8Egigt4kE=
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.57 h1:xsFiOiWjpC1XAGbFEUOzj1/gMXGz7ljfxifwcb/5YXU=
github.com/minio/minio-go/v7 v7.0.57/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/minio-go/v7 v7.0.59 h1:lxIXwsTIcQkYoEG25rUJbzpmSB/oWeVDmxFo/uWUUsw=
github.com/minio/minio-go/v7 v7.0.59/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@ -275,8 +275,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/shirou/gopsutil/v3 v3.23.5 h1:5SgDCeQ0KW0S4N0znjeM/eFHXXOKyv2dVNgRq/c9P6Y=
github.com/shirou/gopsutil/v3 v3.23.5/go.mod h1:Ng3Maa27Q2KARVJ0SPZF5NdrQSC3XHKP8IIWrHgMeLY=
github.com/shirou/gopsutil/v3 v3.23.6 h1:5y46WPI9QBKBbK7EEccUPNXpJpNrvPuTD0O2zHEHT08=
github.com/shirou/gopsutil/v3 v3.23.6/go.mod h1:j7QX50DrXYggrpN30W0Mo+I4/8U2UUIQrnrhqUeWrAU=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
@ -300,7 +300,6 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/swaggo/echo-swagger v1.4.0 h1:RCxLKySw1SceHLqnmc41pKyiIeE+OiD7NSI7FUOBlLo=
@ -322,8 +321,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vektah/gqlparser/v2 v2.5.3 h1:goUwv4+blhtwR3GwefadPVI4ubYc/WZSypljWMQa6IE=
github.com/vektah/gqlparser/v2 v2.5.3/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME=
github.com/vektah/gqlparser/v2 v2.5.6 h1:Ou14T0N1s191eRMZ1gARVqohcbe1e8FrcONScsq8cRU=
github.com/vektah/gqlparser/v2 v2.5.6/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -420,8 +419,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -5,10 +5,10 @@ import (
"fmt"
"go/token"
"go/types"
"strings"
"golang.org/x/tools/go/packages"
"github.com/99designs/gqlgen/codegen/templates"
"github.com/99designs/gqlgen/internal/code"
"github.com/vektah/gqlparser/v2/ast"
)
@ -285,7 +285,7 @@ func (ref *TypeReference) UniquenessKey() string {
// Fix for #896
elemNullability = "ᚄ"
}
return nullability + ref.Definition.Name + "2" + templates.TypeIdentifier(ref.GO) + elemNullability
return nullability + ref.Definition.Name + "2" + TypeIdentifier(ref.GO) + elemNullability
}
func (ref *TypeReference) MarshalFunc() string {
@ -540,3 +540,41 @@ func basicUnderlying(it types.Type) *types.Basic {
return nil
}
var pkgReplacer = strings.NewReplacer(
"/", "ᚋ",
".", "ᚗ",
"-", "ᚑ",
"~", "א",
)
func TypeIdentifier(t types.Type) string {
res := ""
for {
switch it := t.(type) {
case *types.Pointer:
t.Underlying()
res += "ᚖ"
t = it.Elem()
case *types.Slice:
res += "ᚕ"
t = it.Elem()
case *types.Named:
res += pkgReplacer.Replace(it.Obj().Pkg().Path())
res += "ᚐ"
res += it.Obj().Name()
return res
case *types.Basic:
res += it.Name()
return res
case *types.Map:
res += "map"
return res
case *types.Interface:
res += "interface"
return res
default:
panic(fmt.Errorf("unexpected type %T", it))
}
}
}

View File

@ -26,8 +26,10 @@ type Config struct {
Models TypeMap `yaml:"models,omitempty"`
StructTag string `yaml:"struct_tag,omitempty"`
Directives map[string]DirectiveConfig `yaml:"directives,omitempty"`
GoInitialisms GoInitialismsConfig `yaml:"go_initialisms,omitempty"`
OmitSliceElementPointers bool `yaml:"omit_slice_element_pointers,omitempty"`
OmitGetters bool `yaml:"omit_getters,omitempty"`
OmitInterfaceChecks bool `yaml:"omit_interface_checks,omitempty"`
OmitComplexity bool `yaml:"omit_complexity,omitempty"`
OmitGQLGenFileNotice bool `yaml:"omit_gqlgen_file_notice,omitempty"`
OmitGQLGenVersionInFileNotice bool `yaml:"omit_gqlgen_version_in_file_notice,omitempty"`
@ -201,6 +203,9 @@ func CompleteConfig(config *Config) error {
config.Sources = append(config.Sources, &ast.Source{Name: filename, Input: string(schemaRaw)})
}
config.GoInitialisms.setInitialisms()
return nil
}
@ -305,8 +310,9 @@ func (c *Config) injectTypesFromSchema() error {
if c.Models[schemaType.Name].Fields == nil {
c.Models[schemaType.Name] = TypeMapEntry{
Model: c.Models[schemaType.Name].Model,
Fields: map[string]TypeMapField{},
Model: c.Models[schemaType.Name].Model,
ExtraFields: c.Models[schemaType.Name].ExtraFields,
Fields: map[string]TypeMapField{},
}
}

View File

@ -0,0 +1,94 @@
package config
import "strings"
// commonInitialisms is a set of common initialisms.
// Only add entries that are highly unlikely to be non-initialisms.
// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
var commonInitialisms = map[string]bool{
"ACL": true,
"API": true,
"ASCII": true,
"CPU": true,
"CSS": true,
"CSV": true,
"DNS": true,
"EOF": true,
"GUID": true,
"HTML": true,
"HTTP": true,
"HTTPS": true,
"ICMP": true,
"ID": true,
"IP": true,
"JSON": true,
"KVK": true,
"LHS": true,
"PDF": true,
"PGP": true,
"QPS": true,
"QR": true,
"RAM": true,
"RHS": true,
"RPC": true,
"SLA": true,
"SMTP": true,
"SQL": true,
"SSH": true,
"SVG": true,
"TCP": true,
"TLS": true,
"TTL": true,
"UDP": true,
"UI": true,
"UID": true,
"URI": true,
"URL": true,
"UTF8": true,
"UUID": true,
"VM": true,
"XML": true,
"XMPP": true,
"XSRF": true,
"XSS": true,
}
// GetInitialisms returns the initialisms to capitalize in Go names. If unchanged, default initialisms will be returned
var GetInitialisms = func() map[string]bool {
return commonInitialisms
}
// GoInitialismsConfig allows to modify the default behavior of naming Go methods, types and properties
type GoInitialismsConfig struct {
// If true, the Initialisms won't get appended to the default ones but replace them
ReplaceDefaults bool `yaml:"replace_defaults"`
// Custom initialisms to be added or to replace the default ones
Initialisms []string `yaml:"initialisms"`
}
// setInitialisms adjustes GetInitialisms based on its settings.
func (i GoInitialismsConfig) setInitialisms() {
toUse := i.determineGoInitialisms()
GetInitialisms = func() map[string]bool {
return toUse
}
}
// determineGoInitialisms returns the Go initialims to be used, based on its settings.
func (i GoInitialismsConfig) determineGoInitialisms() (initialismsToUse map[string]bool) {
if i.ReplaceDefaults {
initialismsToUse = make(map[string]bool, len(i.Initialisms))
for _, initialism := range i.Initialisms {
initialismsToUse[strings.ToUpper(initialism)] = true
}
} else {
initialismsToUse = make(map[string]bool, len(commonInitialisms)+len(i.Initialisms))
for initialism, value := range commonInitialisms {
initialismsToUse[strings.ToUpper(initialism)] = value
}
for _, initialism := range i.Initialisms {
initialismsToUse[strings.ToUpper(initialism)] = true
}
}
return initialismsToUse
}

View File

@ -17,8 +17,8 @@ import (
"text/template"
"unicode"
"github.com/99designs/gqlgen/codegen/config"
"github.com/99designs/gqlgen/internal/code"
"github.com/99designs/gqlgen/internal/imports"
)
@ -202,7 +202,7 @@ func Funcs() template.FuncMap {
"rawQuote": rawQuote,
"dump": Dump,
"ref": ref,
"ts": TypeIdentifier,
"ts": config.TypeIdentifier,
"call": Call,
"prefixLines": prefixLines,
"notNil": notNil,
@ -248,44 +248,6 @@ func ref(p types.Type) string {
return CurrentImports.LookupType(p)
}
var pkgReplacer = strings.NewReplacer(
"/", "ᚋ",
".", "ᚗ",
"-", "ᚑ",
"~", "א",
)
func TypeIdentifier(t types.Type) string {
res := ""
for {
switch it := t.(type) {
case *types.Pointer:
t.Underlying()
res += "ᚖ"
t = it.Elem()
case *types.Slice:
res += "ᚕ"
t = it.Elem()
case *types.Named:
res += pkgReplacer.Replace(it.Obj().Pkg().Path())
res += "ᚐ"
res += it.Obj().Name()
return res
case *types.Basic:
res += it.Name()
return res
case *types.Map:
res += "map"
return res
case *types.Interface:
res += "interface"
return res
default:
panic(fmt.Errorf("unexpected type %T", it))
}
}
}
func Call(p *types.Func) string {
pkg := CurrentImports.Lookup(p.Pkg().Path())
@ -503,14 +465,15 @@ func wordWalker(str string, f func(*wordInfo)) {
}
i++
initialisms := config.GetInitialisms()
// [w,i) is a word.
word := string(runes[w:i])
if !eow && commonInitialisms[word] && !unicode.IsLower(runes[i]) {
if !eow && initialisms[word] && !unicode.IsLower(runes[i]) {
// through
// split IDFoo → ID, Foo
// but URLs → URLs
} else if !eow {
if commonInitialisms[word] {
if initialisms[word] {
hasCommonInitial = true
}
continue
@ -518,7 +481,7 @@ func wordWalker(str string, f func(*wordInfo)) {
matchCommonInitial := false
upperWord := strings.ToUpper(word)
if commonInitialisms[upperWord] {
if initialisms[upperWord] {
// If the uppercase word (string(runes[w:i]) is "ID" or "IP"
// AND
// the word is the first two characters of the str
@ -591,57 +554,6 @@ func sanitizeKeywords(name string) string {
return name
}
// commonInitialisms is a set of common initialisms.
// Only add entries that are highly unlikely to be non-initialisms.
// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
var commonInitialisms = map[string]bool{
"ACL": true,
"API": true,
"ASCII": true,
"CPU": true,
"CSS": true,
"CSV": true,
"DNS": true,
"EOF": true,
"GUID": true,
"HTML": true,
"HTTP": true,
"HTTPS": true,
"ICMP": true,
"ID": true,
"IP": true,
"JSON": true,
"KVK": true,
"LHS": true,
"PDF": true,
"PGP": true,
"QPS": true,
"QR": true,
"RAM": true,
"RHS": true,
"RPC": true,
"SLA": true,
"SMTP": true,
"SQL": true,
"SSH": true,
"SVG": true,
"TCP": true,
"TLS": true,
"TTL": true,
"UDP": true,
"UI": true,
"UID": true,
"URI": true,
"URL": true,
"UTF8": true,
"UUID": true,
"VM": true,
"XML": true,
"XMPP": true,
"XSRF": true,
"XSS": true,
}
func rawQuote(s string) string {
return "`" + strings.ReplaceAll(s, "`", "`+\"`\"+`") + "`"
}

View File

@ -73,29 +73,26 @@ func (t SSE) Do(w http.ResponseWriter, r *http.Request, exec graphql.GraphExecut
return
}
rc, OpErr := exec.CreateOperationContext(ctx, params)
if OpErr != nil {
w.WriteHeader(statusFor(OpErr))
resp := exec.DispatchError(graphql.WithOperationContext(ctx, rc), OpErr)
writeJson(w, resp)
return
}
rc, opErr := exec.CreateOperationContext(ctx, params)
ctx = graphql.WithOperationContext(ctx, rc)
w.Header().Set("Content-Type", "text/event-stream")
fmt.Fprint(w, ":\n\n")
flusher.Flush()
responses, ctx := exec.DispatchOperation(ctx, rc)
for {
response := responses(ctx)
if response == nil {
break
if opErr != nil {
resp := exec.DispatchError(ctx, opErr)
writeJsonWithSSE(w, resp)
} else {
responses, ctx := exec.DispatchOperation(ctx, rc)
for {
response := responses(ctx)
if response == nil {
break
}
writeJsonWithSSE(w, response)
flusher.Flush()
}
writeJsonWithSSE(w, response)
flusher.Flush()
}
fmt.Fprint(w, "event: complete\n\n")

View File

@ -74,13 +74,15 @@ func (f *Field) IsDeprecated() bool {
}
func (f *Field) DeprecationReason() *string {
if f.deprecation == nil {
if f.deprecation == nil || !f.IsDeprecated() {
return nil
}
reason := f.deprecation.Arguments.ForName("reason")
if reason == nil {
return nil
defaultReason := "No longer supported"
return &defaultReason
}
return &reason.Value.Raw

View File

@ -24,7 +24,7 @@ var apolloSandboxPage = template.Must(template.New("ApolloSandbox").Parse(`<!doc
<body>
<div style="width: 100vw; height: 100vh;" id='embedded-sandbox'></div>
<!-- NOTE: New version available at https://embeddable-sandbox.cdn.apollographql.com/ -->
<script rel="preload" as="script" crossorigin="anonymous" integrity="{{.mainSRI}}" type="text/javascript" src="https://embeddable-sandbox.cdn.apollographql.com/58165cf7452dbad480c7cb85e7acba085b3bac1d/embeddable-sandbox.umd.production.min.js"></script>
<script rel="preload" as="script" crossorigin="anonymous" integrity="{{.mainSRI}}" type="text/javascript" src="https://embeddable-sandbox.cdn.apollographql.com/7212121cad97028b007e974956dc951ce89d683c/embeddable-sandbox.umd.production.min.js"></script>
<script>
{{- if .endpointIsAbsolute}}
const url = {{.endpoint}};
@ -53,7 +53,7 @@ func ApolloSandboxHandler(title, endpoint string) http.HandlerFunc {
"title": title,
"endpoint": endpoint,
"endpointIsAbsolute": endpointHasScheme(endpoint),
"mainSRI": "sha256-/E4VNgAWFmbNLyXACSYoqsDAj68jC1sCMSQ0cDjf4YM=",
"mainSRI": "sha256-/ldbSJ7EovavF815TfCN50qKB9AMvzskb9xiG71bmg2I=",
})
if err != nil {
panic(err)

View File

@ -1,3 +1,3 @@
package graphql
const Version = "v0.17.33"
const Version = "v0.17.34"

View File

@ -32,6 +32,9 @@ resolver:
# Optional: turn on to use []Thing instead of []*Thing
# omit_slice_element_pointers: false
# Optional: turn on to omit Is<Name>() methods to interface and unions
# omit_interface_checks : true
# Optional: turn on to skip generation of ComplexityRoot struct content and Complexity function
# omit_complexity: false

View File

@ -50,6 +50,7 @@ type Interface struct {
Name string
Fields []*Field
Implements []string
OmitCheck bool
}
type Object struct {
@ -124,6 +125,7 @@ func (m *Plugin) MutateConfig(cfg *config.Config) error {
Name: schemaType.Name,
Implements: schemaType.Interfaces,
Fields: fields,
OmitCheck: cfg.OmitInterfaceChecks,
}
b.Interfaces = append(b.Interfaces, it)

View File

@ -15,10 +15,12 @@
{{- range $model := .Interfaces }}
{{ with .Description }} {{.|prefixLines "// "}} {{ end }}
type {{ goModelName .Name }} interface {
{{- range $impl := .Implements }}
Is{{ goModelName $impl }}()
{{- if not .OmitCheck }}
{{- range $impl := .Implements }}
Is{{ goModelName $impl }}()
{{- end }}
Is{{ goModelName .Name }}()
{{- end }}
Is{{ goModelName .Name }}()
{{- range $field := .Fields }}
{{- with .Description }}
{{.|prefixLines "// "}}

View File

@ -75,10 +75,11 @@ CertMagic - Automatic HTTPS using Let's Encrypt
## Features
- Fully automated certificate management including issuance and renewal
- One-liner, fully managed HTTPS servers
- One-line, fully managed HTTPS servers
- Full control over almost every aspect of the system
- HTTP->HTTPS redirects
- Solves all 3 ACME challenges: HTTP, TLS-ALPN, and DNS
- Multiple issuers supported: get certificates from multiple sources/CAs for redundancy and resiliency
- Solves all 3 common ACME challenges: HTTP, TLS-ALPN, and DNS (and capable of others)
- Most robust error handling of _any_ ACME client
- Challenges are randomized to avoid accidental dependence
- Challenges are rotated to overcome certain network blockages
@ -88,7 +89,8 @@ CertMagic - Automatic HTTPS using Let's Encrypt
- Written in Go, a language with memory-safety guarantees
- Powered by [ACMEz](https://github.com/mholt/acmez), _the_ premier ACME client library for Go
- All [libdns](https://github.com/libdns) DNS providers work out-of-the-box
- Pluggable storage implementations (default: file system)
- Pluggable storage backends (default: file system)
- Pluggable key sources
- Wildcard certificates
- Automatic OCSP stapling ([done right](https://gist.github.com/sleevi/5efe9ef98961ecfb4da8#gistcomment-2336055)) [keeps your sites online!](https://twitter.com/caddyserver/status/1234874273724084226)
- Will [automatically attempt](https://twitter.com/mholt6/status/1235577699541762048) to replace [revoked certificates](https://community.letsencrypt.org/t/2020-02-29-caa-rechecking-bug/114591/3?u=mholt)!
@ -101,7 +103,8 @@ CertMagic - Automatic HTTPS using Let's Encrypt
- Caddy / CertMagic pioneered this technology
- Custom decision functions to regulate and throttle on-demand behavior
- Optional event hooks for observation
- Works with any certificate authority (CA) compliant with the ACME specification
- One-time private keys by default (new key for each cert) to discourage pinning and reduce scope of key compromise
- Works with any certificate authority (CA) compliant with the ACME specification RFC 8555
- Certificate revocation (please, only if private key is compromised)
- Must-Staple (optional; not default)
- Cross-platform support! Mac, Windows, Linux, BSD, Android...
@ -478,13 +481,15 @@ CertMagic emits events when possible things of interest happen. Set the [`OnEven
- `identifier`: The name on the certificate
- `remaining`: Time left on the certificate (if renewal)
- `issuer`: The previous or current issuer
- `storage_key`: The path to the cert resources within storage
- `storage_path`: The path to the folder containing the cert resources within storage
- `private_key_path`: The path to the private key file in storage
- `certificate_path`: The path to the public key file in storage
- `metadata_path`: The path to the metadata file in storage
- **`cert_failed`** An attempt to obtain a certificate failed
- `renewal`: Whether this is a renewal
- `identifier`: The name on the certificate
- `remaining`: Time left on the certificate (if renewal)
- `issuer`: The previous or current issuer
- `storage_key`: The path to the cert resources within storage
- `issuers`: The issuer(s) tried
- `error`: The (final) error message
- **`tls_get_certificate`** The GetCertificate phase of a TLS handshake is under way
- `client_hello`: The tls.ClientHelloInfo struct

View File

@ -411,6 +411,23 @@ type KeyGenerator interface {
GenerateKey() (crypto.PrivateKey, error)
}
// IssuerPolicy is a type that enumerates how to
// choose which issuer to use. EXPERIMENTAL and
// subject to change.
type IssuerPolicy string
// Supported issuer policies. These are subject to change.
const (
// UseFirstIssuer uses the first issuer that
// successfully returns a certificate.
UseFirstIssuer = "first"
// UseFirstRandomIssuer shuffles the list of
// configured issuers, then uses the first one
// that successfully returns a certificate.
UseFirstRandomIssuer = "first_random"
)
// IssuedCertificate represents a certificate that was just issued.
type IssuedCertificate struct {
// The PEM-encoding of DER-encoded ASN.1 data.

View File

@ -95,6 +95,17 @@ type Config struct {
// turn until one succeeds.
Issuers []Issuer
// How to select which issuer to use.
// Default: UseFirstIssuer (subject to change).
IssuerPolicy IssuerPolicy
// If true, private keys already existing in storage
// will be reused. Otherwise, a new key will be
// created for every new certificate to mitigate
// pinning and reduce the scope of key compromise.
// Default: false (do not reuse keys).
ReusePrivateKeys bool
// The source of new private keys for certificates;
// the default KeySource is StandardKeyGenerator.
KeySource KeyGenerator
@ -526,10 +537,25 @@ func (cfg *Config) obtainCert(ctx context.Context, name string, interactive bool
return fmt.Errorf("obtaining certificate aborted by event handler: %w", err)
}
// if storage has a private key already, use it; otherwise we'll generate our own
privKey, privKeyPEM, issuers, err := cfg.reusePrivateKey(ctx, name)
if err != nil {
return err
// If storage has a private key already, use it; otherwise we'll generate our own.
// Also create the slice of issuers we will try using according to any issuer
// selection policy (it must be a copy of the slice so we don't mutate original).
var privKey crypto.PrivateKey
var privKeyPEM []byte
var issuers []Issuer
if cfg.ReusePrivateKeys {
privKey, privKeyPEM, issuers, err = cfg.reusePrivateKey(ctx, name)
if err != nil {
return err
}
} else {
issuers = make([]Issuer, len(cfg.Issuers))
copy(issuers, cfg.Issuers)
}
if cfg.IssuerPolicy == UseFirstRandomIssuer {
weakrand.Shuffle(len(issuers), func(i, j int) {
issuers[i], issuers[j] = issuers[j], issuers[i]
})
}
if privKey == nil {
privKey, err = cfg.KeySource.GenerateKey()
@ -593,6 +619,7 @@ func (cfg *Config) obtainCert(ctx context.Context, name string, interactive bool
// only the error from the last issuer will be returned, but we logged the others
return fmt.Errorf("[%s] Obtain: %w", name, err)
}
issuerKey := issuerUsed.IssuerKey()
// success - immediately save the certificate resource
certRes := CertificateResource{
@ -609,11 +636,16 @@ func (cfg *Config) obtainCert(ctx context.Context, name string, interactive bool
log.Info("certificate obtained successfully", zap.String("identifier", name))
certKey := certRes.NamesKey()
cfg.emit(ctx, "cert_obtained", map[string]any{
"renewal": false,
"identifier": name,
"issuers": issuerUsed.IssuerKey(),
"storage_key": certRes.NamesKey(),
"renewal": false,
"identifier": name,
"issuer": issuerUsed.IssuerKey(),
"storage_path": StorageKeys.CertsSitePrefix(issuerKey, certKey),
"private_key_path": StorageKeys.SitePrivateKey(issuerKey, certKey),
"certificate_path": StorageKeys.SiteCert(issuerKey, certKey),
"metadata_path": StorageKeys.SiteMeta(issuerKey, certKey),
})
return nil
@ -683,9 +715,6 @@ func (cfg *Config) storageHasCertResourcesAnyIssuer(ctx context.Context, name st
// and its assets in storage if successful. It DOES NOT update the in-memory
// cache with the new certificate. The certificate will not be renewed if it
// is not close to expiring unless force is true.
//
// Renewing a certificate is the same as obtaining a certificate, except that
// the existing private key already in storage is reused.
func (cfg *Config) RenewCertSync(ctx context.Context, name string, force bool) error {
return cfg.renewCert(ctx, name, force, true)
}
@ -766,10 +795,25 @@ func (cfg *Config) renewCert(ctx context.Context, name string, force, interactiv
return fmt.Errorf("renewing certificate aborted by event handler: %w", err)
}
privateKey, err := PEMDecodePrivateKey(certRes.PrivateKeyPEM)
// reuse or generate new private key for CSR
var privateKey crypto.PrivateKey
if cfg.ReusePrivateKeys {
privateKey, err = PEMDecodePrivateKey(certRes.PrivateKeyPEM)
} else {
privateKey, err = cfg.KeySource.GenerateKey()
}
if err != nil {
return err
}
// if we generated a new key, make sure to replace its PEM encoding too!
if !cfg.ReusePrivateKeys {
certRes.PrivateKeyPEM, err = PEMEncodePrivateKey(privateKey)
if err != nil {
return err
}
}
csr, err := cfg.generateCSR(privateKey, []string{name})
if err != nil {
return err
@ -808,17 +852,17 @@ func (cfg *Config) renewCert(ctx context.Context, name string, force, interactiv
}
if err != nil {
cfg.emit(ctx, "cert_failed", map[string]any{
"renewal": true,
"identifier": name,
"remaining": timeLeft,
"issuers": issuerKeys,
"storage_key": certRes.NamesKey(),
"error": err,
"renewal": true,
"identifier": name,
"remaining": timeLeft,
"issuers": issuerKeys,
"error": err,
})
// only the error from the last issuer will be returned, but we logged the others
return fmt.Errorf("[%s] Renew: %w", name, err)
}
issuerKey := issuerUsed.IssuerKey()
// success - immediately save the renewed certificate resource
newCertRes := CertificateResource{
@ -826,7 +870,7 @@ func (cfg *Config) renewCert(ctx context.Context, name string, force, interactiv
CertificatePEM: issuedCert.Certificate,
PrivateKeyPEM: certRes.PrivateKeyPEM,
IssuerData: issuedCert.Metadata,
issuerKey: issuerUsed.IssuerKey(),
issuerKey: issuerKey,
}
err = cfg.saveCertResource(ctx, issuerUsed, newCertRes)
if err != nil {
@ -835,12 +879,17 @@ func (cfg *Config) renewCert(ctx context.Context, name string, force, interactiv
log.Info("certificate renewed successfully", zap.String("identifier", name))
certKey := newCertRes.NamesKey()
cfg.emit(ctx, "cert_obtained", map[string]any{
"renewal": true,
"remaining": timeLeft,
"identifier": name,
"issuer": issuerUsed.IssuerKey(),
"storage_key": certRes.NamesKey(),
"renewal": true,
"remaining": timeLeft,
"identifier": name,
"issuer": issuerKey,
"storage_path": StorageKeys.CertsSitePrefix(issuerKey, certKey),
"private_key_path": StorageKeys.SitePrivateKey(issuerKey, certKey),
"certificate_path": StorageKeys.SiteCert(issuerKey, certKey),
"metadata_path": StorageKeys.SiteMeta(issuerKey, certKey),
})
return nil

View File

@ -154,6 +154,11 @@ func (s *FileStorage) Filename(key string) string {
func (s *FileStorage) Lock(ctx context.Context, name string) error {
filename := s.lockFilename(name)
// sometimes the lockfiles read as empty (size 0) - this is either a stale lock or it
// is currently being written; we can retry a few times in this case, as it has been
// shown to help (issue #232)
var emptyCount int
for {
err := createLockfile(filename)
if err == nil {
@ -173,11 +178,23 @@ func (s *FileStorage) Lock(ctx context.Context, name string) error {
err2 := json.NewDecoder(f).Decode(&meta)
f.Close()
if errors.Is(err2, io.EOF) {
// lockfile is empty or truncated; I *think* we can assume the previous
// acquirer either crashed or had some sort of failure that caused them
// to be unable to fully acquire or retain the lock, therefore we should
// treat it as if the lockfile did not exist
log.Printf("[INFO][%s] %s: Empty lockfile (%v) - likely previous process crashed or storage medium failure; treating as stale", s, filename, err2)
emptyCount++
if emptyCount < 8 {
// wait for brief time and retry; could be that the file is in the process
// of being written or updated (which involves truncating) - see issue #232
select {
case <-time.After(250 * time.Millisecond):
case <-ctx.Done():
return ctx.Err()
}
continue
} else {
// lockfile is empty or truncated multiple times; I *think* we can assume
// the previous acquirer either crashed or had some sort of failure that
// caused them to be unable to fully acquire or retain the lock, therefore
// we should treat it as if the lockfile did not exist
log.Printf("[INFO][%s] %s: Empty lockfile (%v) - likely previous process crashed or storage medium failure; treating as stale", s, filename, err2)
}
} else if err2 != nil {
return fmt.Errorf("decoding lockfile contents: %w", err2)
}
@ -311,6 +328,8 @@ func updateLockfileFreshness(filename string) (bool, error) {
}
var meta lockMeta
if err := json.Unmarshal(metaBytes, &meta); err != nil {
// see issue #232: this can error if the file is empty,
// which happens sometimes when the disk is REALLY slow
return true, err
}

View File

@ -49,6 +49,9 @@ func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.moveToFront(ent)
if c.onEvict != nil {
c.onEvict(key, ent.value)
}
ent.value = value
return false
}

View File

@ -147,6 +147,13 @@ type Reader struct {
ignoreCRC bool
}
// GetBufferCapacity returns the capacity of the internal buffer.
// This might be useful to know when reusing the same reader in combination
// with the lazy buffer option.
func (r *Reader) GetBufferCapacity() int {
return cap(r.buf)
}
// ensureBufferSize will ensure that the buffer can take at least n bytes.
// If false is returned the buffer exceeds maximum allowed size.
func (r *Reader) ensureBufferSize(n int) bool {

View File

@ -126,6 +126,10 @@ func (c *Client) provisionDirectory(ctx context.Context) error {
if err != nil {
return err
}
if c.dir.NewOrder == "" {
// catch faulty ACME servers that may not return proper HTTP status on errors
return fmt.Errorf("server did not return error headers, but required directory fields are missing: %+v", c.dir)
}
directories[c.Directory] = cachedDirectory{c.dir, time.Now()}
return nil
}

View File

@ -50,10 +50,6 @@ import (
"golang.org/x/net/idna"
)
func init() {
weakrand.Seed(time.Now().UnixNano())
}
// Client is a high-level API for ACME operations. It wraps
// a lower-level ACME client with useful functions to make
// common flows easier, especially for the issuance of
@ -65,33 +61,39 @@ type Client struct {
ChallengeSolvers map[string]Solver
}
// ObtainCertificateUsingCSR obtains all resulting certificate chains using the given CSR, which
// must be completely and properly filled out (particularly its DNSNames and Raw fields - this
// usually involves creating a template CSR, then calling x509.CreateCertificateRequest, then
// x509.ParseCertificateRequest on the output). The Subject CommonName is NOT considered.
// CSRSource is an interface that provides users of this
// package the ability to provide a CSR as part of the
// ACME flow. This allows the final CSR to be provided
// just before the Order is finalized.
type CSRSource interface {
CSR(context.Context) (*x509.CertificateRequest, error)
}
// ObtainCertificateUsingCSRSource obtains all resulting certificate chains using the given
// ACME Identifiers and the CSRSource. The CSRSource can be used to create and sign a final
// CSR to be submitted to the ACME server just before finalization. The CSR must be completely
// and properly filled out, because the provided ACME Identifiers will be validated against
// the Identifiers that can be extracted from the CSR. This package currently supports the
// DNS, IP address, Permanent Identifier and Hardware Module Name identifiers. The Subject
// CommonName is NOT considered.
//
// It implements every single part of the ACME flow described in RFC 8555 §7.1 with the exception
// of "Create account" because this method signature does not have a way to return the updated
// account object. The account's status MUST be "valid" in order to succeed.
// The CSR's Raw field containing the DER encoded signed certificate request must also be
// set. This usually involves creating a template CSR, then calling x509.CreateCertificateRequest,
// then x509.ParseCertificateRequest on the output.
//
// As far as SANs go, this method currently only supports DNSNames and IPAddresses on the csr.
func (c *Client) ObtainCertificateUsingCSR(ctx context.Context, account acme.Account, csr *x509.CertificateRequest) ([]acme.Certificate, error) {
// The method implements every single part of the ACME flow described in RFC 8555 §7.1 with the
// exception of "Create account" because this method signature does not have a way to return
// the updated account object. The account's status MUST be "valid" in order to succeed.
func (c *Client) ObtainCertificateUsingCSRSource(ctx context.Context, account acme.Account, identifiers []acme.Identifier, source CSRSource) ([]acme.Certificate, error) {
if account.Status != acme.StatusValid {
return nil, fmt.Errorf("account status is not valid: %s", account.Status)
}
if csr == nil {
return nil, fmt.Errorf("missing CSR")
if source == nil {
return nil, errors.New("missing CSR source")
}
ids, err := createIdentifiersUsingCSR(csr)
if err != nil {
return nil, err
}
if len(ids) == 0 {
return nil, fmt.Errorf("no identifiers found")
}
order := acme.Order{Identifiers: ids}
var err error
order := acme.Order{Identifiers: identifiers}
// remember which challenge types failed for which identifiers
// so we can retry with other challenge types
@ -154,6 +156,20 @@ func (c *Client) ObtainCertificateUsingCSR(ctx context.Context, account acme.Acc
c.Logger.Info("validations succeeded; finalizing order", zap.String("order", order.Location))
}
// get the CSR from its source
csr, err := source.CSR(ctx)
if err != nil {
return nil, fmt.Errorf("getting CSR from source: %w", err)
}
if csr == nil {
return nil, errors.New("source did not provide CSR")
}
// validate the order identifiers
if err := validateOrderIdentifiers(&order, csr); err != nil {
return nil, fmt.Errorf("validating order identifiers: %w", err)
}
// finalize the order, which requests the CA to issue us a certificate
order, err = c.Client.FinalizeOrder(ctx, account, order, csr.Raw)
if err != nil {
@ -180,6 +196,80 @@ func (c *Client) ObtainCertificateUsingCSR(ctx context.Context, account acme.Acc
return certChains, nil
}
// validateOrderIdentifiers checks if the ACME identifiers provided for the
// Order match the identifiers that are in the CSR. A mismatch between the two
// should result the certificate not being issued by the ACME server, but
// checking this on the client side is faster. Currently there's no way to
// skip this validation.
func validateOrderIdentifiers(order *acme.Order, csr *x509.CertificateRequest) error {
csrIdentifiers, err := createIdentifiersUsingCSR(csr)
if err != nil {
return fmt.Errorf("extracting identifiers from CSR: %w", err)
}
if len(csrIdentifiers) != len(order.Identifiers) {
return fmt.Errorf("number of identifiers in Order %v (%d) does not match the number of identifiers extracted from CSR %v (%d)", order.Identifiers, len(order.Identifiers), csrIdentifiers, len(csrIdentifiers))
}
identifiers := make([]acme.Identifier, 0, len(order.Identifiers))
for _, identifier := range order.Identifiers {
for _, csrIdentifier := range csrIdentifiers {
if csrIdentifier.Value == identifier.Value && csrIdentifier.Type == identifier.Type {
identifiers = append(identifiers, identifier)
}
}
}
if len(identifiers) != len(csrIdentifiers) {
return fmt.Errorf("identifiers in Order %v do not match the identifiers extracted from CSR %v", order.Identifiers, csrIdentifiers)
}
return nil
}
// csrSource implements the CSRSource interface and is used internally
// to pass a CSR to ObtainCertificateUsingCSRSource from the existing
// ObtainCertificateUsingCSR method.
type csrSource struct {
csr *x509.CertificateRequest
}
func (i *csrSource) CSR(_ context.Context) (*x509.CertificateRequest, error) {
return i.csr, nil
}
var _ CSRSource = (*csrSource)(nil)
// ObtainCertificateUsingCSR obtains all resulting certificate chains using the given CSR, which
// must be completely and properly filled out (particularly its DNSNames and Raw fields - this
// usually involves creating a template CSR, then calling x509.CreateCertificateRequest, then
// x509.ParseCertificateRequest on the output). The Subject CommonName is NOT considered.
//
// It implements every single part of the ACME flow described in RFC 8555 §7.1 with the exception
// of "Create account" because this method signature does not have a way to return the updated
// account object. The account's status MUST be "valid" in order to succeed.
//
// As far as SANs go, this method currently only supports DNSNames, IPAddresses, Permanent
// Identifiers and Hardware Module Names on the CSR.
func (c *Client) ObtainCertificateUsingCSR(ctx context.Context, account acme.Account, csr *x509.CertificateRequest) ([]acme.Certificate, error) {
if csr == nil {
return nil, errors.New("missing CSR")
}
ids, err := createIdentifiersUsingCSR(csr)
if err != nil {
return nil, err
}
if len(ids) == 0 {
return nil, errors.New("no identifiers found")
}
csrSource := &csrSource{
csr: csr,
}
return c.ObtainCertificateUsingCSRSource(ctx, account, ids, csrSource)
}
// ObtainCertificate is the same as ObtainCertificateUsingCSR, except it is a slight wrapper
// that generates the CSR for you. Doing so requires the private key you will be using for
// the certificate (different from the account private key). It obtains a certificate for
@ -252,10 +342,12 @@ func (c *Client) getAuthzObjects(ctx context.Context, account acme.Account, orde
preferredChallenges.addUnique(chal.Type)
}
if preferredWasEmpty {
weakrand.Shuffle(len(preferredChallenges), func(i, j int) {
randomSourceMu.Lock()
randomSource.Shuffle(len(preferredChallenges), func(i, j int) {
preferredChallenges[i], preferredChallenges[j] =
preferredChallenges[j], preferredChallenges[i]
})
randomSourceMu.Unlock()
}
preferredChallengesMu.Unlock()
@ -702,9 +794,15 @@ type retryableErr struct{ error }
func (re retryableErr) Unwrap() error { return re.error }
// Keep a list of challenges we've seen offered by servers,
// and prefer keep an ordered list of
// Keep a list of challenges we've seen offered by servers, ordered by success rate.
var (
preferredChallenges challengeTypes
preferredChallengesMu sync.Mutex
)
// Best practice is to avoid the default RNG source and seed our own;
// custom sources are not safe for concurrent use, hence the mutex.
var (
randomSource = weakrand.New(weakrand.NewSource(time.Now().UnixNano()))
randomSourceMu sync.Mutex
)

View File

@ -1,3 +1,17 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acmez
import (

View File

@ -183,14 +183,13 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
// This allows users of the library to implement their own connection management,
// as opposed to Exchange, which will always use new connections and incur the added overhead
// that entails when using "tcp" and especially "tcp-tls" clients.
//
// When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to
// prevent one cancellation from canceling all outstanding requests.
func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
return c.exchangeWithConnContext(context.Background(), m, conn)
return c.ExchangeWithConnContext(context.Background(), m, conn)
}
func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
// ExchangeWithConnContext has the same behaviour as ExchangeWithConn and
// additionally obeys deadlines from the passed Context.
func (c *Client) ExchangeWithConnContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {
@ -460,5 +459,5 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg,
}
defer conn.Close()
return c.exchangeWithConnContext(ctx, m, conn)
return c.ExchangeWithConnContext(ctx, m, conn)
}

View File

@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
var Version = v{1, 1, 54}
var Version = v{1, 1, 55}
// v holds the version of this library.
type v struct {

View File

@ -289,3 +289,67 @@ func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketNam
}
return rinfo, nil
}
// GetBucketReplicationMetricsV2 fetches bucket replication status metrics
func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return s, err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("replication-metrics", "2")
// Execute GET on bucket to get replication metrics.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return s, err
}
if resp.StatusCode != http.StatusOK {
return s, httpRespToErrorResponse(resp, bucketName, "")
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return s, err
}
if err := json.Unmarshal(respBytes, &s); err != nil {
return s, err
}
return s, nil
}
// CheckBucketReplication validates if replication is set up properly for a bucket
func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("replication-check", "")
// Execute GET on bucket to get replication config.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
return nil
}

View File

@ -222,6 +222,9 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
if dstOpts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true")
}
if dstOpts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}

View File

@ -23,6 +23,7 @@ import (
"fmt"
"io"
"net/http"
"strings"
)
/* **** SAMPLE ERROR RESPONSE ****
@ -188,6 +189,15 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
}
code := resp.Header.Get("x-minio-error-code")
if code != "" {
errResp.Code = code
}
desc := resp.Header.Get("x-minio-error-desc")
if desc != "" {
errResp.Message = strings.Trim(desc, `"`)
}
// Save hostID, requestID and region information
// from headers if not available through error XML.
if errResp.RequestID == "" {

View File

@ -56,14 +56,15 @@ func (r ReplicationStatus) Empty() bool {
// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition
// implementation on MinIO server
type AdvancedPutOptions struct {
SourceVersionID string
SourceETag string
ReplicationStatus ReplicationStatus
SourceMTime time.Time
ReplicationRequest bool
RetentionTimestamp time.Time
TaggingTimestamp time.Time
LegalholdTimestamp time.Time
SourceVersionID string
SourceETag string
ReplicationStatus ReplicationStatus
SourceMTime time.Time
ReplicationRequest bool
RetentionTimestamp time.Time
TaggingTimestamp time.Time
LegalholdTimestamp time.Time
ReplicationValidityCheck bool
}
// PutObjectOptions represents options specified by user for PutObject call
@ -188,6 +189,9 @@ func (opts PutObjectOptions) Header() (header http.Header) {
if opts.Internal.ReplicationRequest {
header.Set(minIOBucketReplicationRequest, "true")
}
if opts.Internal.ReplicationValidityCheck {
header.Set(minIOBucketReplicationCheck, "true")
}
if !opts.Internal.LegalholdTimestamp.IsZero() {
header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}

View File

@ -112,10 +112,11 @@ func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
// AdvancedRemoveOptions intended for internal use by replication
type AdvancedRemoveOptions struct {
ReplicationDeleteMarker bool
ReplicationStatus ReplicationStatus
ReplicationMTime time.Time
ReplicationRequest bool
ReplicationDeleteMarker bool
ReplicationStatus ReplicationStatus
ReplicationMTime time.Time
ReplicationRequest bool
ReplicationValidityCheck bool // check permissions
}
// RemoveObjectOptions represents options specified by user for RemoveObject call
@ -168,6 +169,9 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string
if opts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true")
}
if opts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if opts.ForceDelete {
headers.Set(minIOForceDelete, "true")
}

View File

@ -124,7 +124,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.57"
libraryVersion = "v7.0.59"
)
// User Agent should always following the below style.
@ -363,7 +363,8 @@ const (
online = 1
)
// IsOnline returns true if healthcheck enabled and client is online
// IsOnline returns true if healthcheck enabled and client is online.
// If HealthCheck function has not been called this will always return true.
func (c *Client) IsOnline() bool {
return !c.IsOffline()
}
@ -374,22 +375,37 @@ func (c *Client) markOffline() {
}
// IsOffline returns true if healthcheck enabled and client is offline
// If HealthCheck function has not been called this will always return false.
func (c *Client) IsOffline() bool {
return atomic.LoadInt32(&c.healthStatus) == offline
}
// HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function
// and and error if health check is already started
// HealthCheck starts a healthcheck to see if endpoint is up.
// Returns a context cancellation function, to stop the health check,
// and an error if health check is already started.
func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
if atomic.LoadInt32(&c.healthStatus) == online {
if atomic.LoadInt32(&c.healthStatus) != unknown {
return nil, fmt.Errorf("health check is running")
}
if hcDuration < 1*time.Second {
return nil, fmt.Errorf("health check duration should be atleast 1 second")
return nil, fmt.Errorf("health check duration should be at least 1 second")
}
ctx, cancelFn := context.WithCancel(context.Background())
atomic.StoreInt32(&c.healthStatus, online)
probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
ctx, cancelFn := context.WithCancel(context.Background())
atomic.StoreInt32(&c.healthStatus, offline)
{
// Change to online, if we can connect.
gctx, gcancel := context.WithTimeout(ctx, 3*time.Second)
_, err := c.getBucketLocation(gctx, probeBucketName)
gcancel()
if !IsNetworkOrHostDown(err, false) {
switch ToErrorResponse(err).Code {
case "NoSuchBucket", "AccessDenied", "":
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
}
}
}
go func(duration time.Duration) {
timer := time.NewTimer(duration)
defer timer.Stop()

View File

@ -94,6 +94,8 @@ const (
minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check"
// Header indicates last tag update time on source
minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp"
// Header indicates last retention update time on source

View File

@ -33,20 +33,31 @@ type EventType string
//
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
const (
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
BucketCreatedAll EventType = "s3:BucketCreated:*"
BucketRemovedAll EventType = "s3:BucketRemoved:*"
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
ObjectTransitionAll EventType = "s3:ObjectTransition:*"
ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed"
ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete"
ObjectTransitionPost EventType = "s3:ObjectRestore:Post"
ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed"
ObjectReplicationAll EventType = "s3:Replication:*"
ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication"
ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication"
ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold"
ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked"
ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold"
BucketCreatedAll EventType = "s3:BucketCreated:*"
BucketRemovedAll EventType = "s3:BucketRemoved:*"
)
// FilterRule - child of S3Key, a tag in the notification xml which

View File

@ -20,6 +20,7 @@ import (
"bytes"
"encoding/xml"
"fmt"
"math"
"strconv"
"strings"
"time"
@ -704,6 +705,8 @@ type TargetMetrics struct {
BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"`
// Current bandwidth used in bytes/sec for this target
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
// Completed count
ReplicatedCount uint64 `json:"replicationCount"`
}
// Metrics represents inline replication metrics for a bucket.
@ -721,6 +724,10 @@ type Metrics struct {
PendingCount uint64 `json:"pendingReplicationCount"`
// Total number of failed operations including metadata updates across targets
FailedCount uint64 `json:"failedReplicationCount"`
// Total Replica counts
ReplicaCount int64 `json:"replicaCount,omitempty"`
// Total Replicated count
ReplicatedCount int64 `json:"replicationCount,omitempty"`
}
// ResyncTargetsInfo provides replication target information to resync replicated data.
@ -742,9 +749,114 @@ type ResyncTarget struct {
FailedSize int64 `json:"failedReplicationSize,omitempty"`
// Total number of failed operations
FailedCount int64 `json:"failedReplicationCount,omitempty"`
// Total number of failed operations
// Total number of completed operations
ReplicatedCount int64 `json:"replicationCount,omitempty"`
// Last bucket/object replicated.
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
}
// XferStats holds transfer rate info for uploads/sec
type XferStats struct {
AvgRate float64 `json:"avgRate"`
PeakRate float64 `json:"peakRate"`
CurrRate float64 `json:"currRate"`
}
// InQueueStats holds stats for objects in replication queue
type InQueueStats struct {
Count int32 `json:"count"`
Bytes int64 `json:"bytes"`
}
// MetricName name of replication metric
type MetricName string
const (
// Large is a metric name for large objects >=128MiB
Large MetricName = "Large"
// Small is a metric name for objects <128MiB size
Small MetricName = "Small"
// Total is a metric name for total objects
Total MetricName = "Total"
)
// ReplQNodeStats holds stats for a node in replication queue
type ReplQNodeStats struct {
NodeName string `json:"nodeName"`
Uptime int64 `json:"uptime"`
ActiveWorkers int32 `json:"activeWorkers"`
XferStats map[MetricName]XferStats `json:"xferStats"`
QStats map[MetricName]InQueueStats `json:"qStats"`
}
// ReplQueueStats holds stats for replication queue across nodes
type ReplQueueStats struct {
Nodes []ReplQNodeStats `json:"nodes"`
}
// Workers returns number of workers across all nodes
func (q ReplQueueStats) Workers() int64 {
var workers int64
for _, node := range q.Nodes {
workers += int64(node.ActiveWorkers)
}
return workers
}
// ReplQStats holds stats for objects in replication queue
type ReplQStats struct {
Uptime int64 `json:"uptime"`
Workers int64 `json:"workers"`
XferStats map[MetricName]XferStats `json:"xferStats"`
QStats map[MetricName]InQueueStats `json:"qStats"`
}
// QStats returns cluster level stats for objects in replication queue
func (q ReplQueueStats) QStats() (r ReplQStats) {
r.QStats = make(map[MetricName]InQueueStats)
r.XferStats = make(map[MetricName]XferStats)
for _, node := range q.Nodes {
r.Workers += int64(node.ActiveWorkers)
for k, v := range node.XferStats {
st, ok := r.XferStats[k]
if !ok {
st = XferStats{}
}
st.AvgRate += v.AvgRate
st.CurrRate += v.CurrRate
st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
r.XferStats[k] = st
}
for k, v := range node.QStats {
st, ok := r.QStats[k]
if !ok {
st = InQueueStats{}
}
st.Count += v.Count
st.Bytes += v.Bytes
r.QStats[k] = st
}
r.Uptime += node.Uptime
}
if len(q.Nodes) > 0 {
for k := range r.XferStats {
st := r.XferStats[k]
st.AvgRate /= float64(len(q.Nodes))
st.CurrRate /= float64(len(q.Nodes))
r.XferStats[k] = st
}
r.Uptime /= int64(len(q.Nodes)) // average uptime
}
return
}
// MetricsV2 represents replication metrics for a bucket.
type MetricsV2 struct {
History Metrics `json:"history"`
CurrentStats Metrics `json:"currStats"`
QueueStats ReplQueueStats `json:"queueStats"`
}

23
vendor/github.com/shirou/gopsutil/v3/common/env.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package common
type EnvKeyType string
// EnvKey is a context key that can be used to set programmatically the environment
// gopsutil relies on to perform calls against the OS.
// Example of use:
//
// ctx := context.WithValue(context.Background(), common.EnvKey, EnvMap{common.HostProcEnvKey: "/myproc"})
// avg, err := load.AvgWithContext(ctx)
var EnvKey = EnvKeyType("env")
const (
HostProcEnvKey EnvKeyType = "HOST_PROC"
HostSysEnvKey EnvKeyType = "HOST_SYS"
HostEtcEnvKey EnvKeyType = "HOST_ETC"
HostVarEnvKey EnvKeyType = "HOST_VAR"
HostRunEnvKey EnvKeyType = "HOST_RUN"
HostDevEnvKey EnvKeyType = "HOST_DEV"
HostRootEnvKey EnvKeyType = "HOST_ROOT"
)
type EnvMap map[EnvKeyType]string

View File

@ -96,7 +96,7 @@ func Times(percpu bool) ([]TimesStat, error) {
}
func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
filename := common.HostProc("stat")
filename := common.HostProcWithContext(ctx, "stat")
lines := []string{}
if percpu {
statlines, err := common.ReadLines(filename)
@ -126,17 +126,17 @@ func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
return ret, nil
}
func sysCPUPath(cpu int32, relPath string) string {
return common.HostSys(fmt.Sprintf("devices/system/cpu/cpu%d", cpu), relPath)
func sysCPUPath(ctx context.Context, cpu int32, relPath string) string {
return common.HostSysWithContext(ctx, fmt.Sprintf("devices/system/cpu/cpu%d", cpu), relPath)
}
func finishCPUInfo(c *InfoStat) {
func finishCPUInfo(ctx context.Context, c *InfoStat) {
var lines []string
var err error
var value float64
if len(c.CoreID) == 0 {
lines, err = common.ReadLines(sysCPUPath(c.CPU, "topology/core_id"))
lines, err = common.ReadLines(sysCPUPath(ctx, c.CPU, "topology/core_id"))
if err == nil {
c.CoreID = lines[0]
}
@ -145,7 +145,7 @@ func finishCPUInfo(c *InfoStat) {
// override the value of c.Mhz with cpufreq/cpuinfo_max_freq regardless
// of the value from /proc/cpuinfo because we want to report the maximum
// clock-speed of the CPU for c.Mhz, matching the behaviour of Windows
lines, err = common.ReadLines(sysCPUPath(c.CPU, "cpufreq/cpuinfo_max_freq"))
lines, err = common.ReadLines(sysCPUPath(ctx, c.CPU, "cpufreq/cpuinfo_max_freq"))
// if we encounter errors below such as there are no cpuinfo_max_freq file,
// we just ignore. so let Mhz is 0.
if err != nil || len(lines) == 0 {
@ -173,7 +173,7 @@ func Info() ([]InfoStat, error) {
}
func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
filename := common.HostProc("cpuinfo")
filename := common.HostProcWithContext(ctx, "cpuinfo")
lines, _ := common.ReadLines(filename)
var ret []InfoStat
@ -193,7 +193,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
processorName = value
case "processor", "cpu number":
if c.CPU >= 0 {
finishCPUInfo(&c)
finishCPUInfo(ctx, &c)
ret = append(ret, c)
}
c = InfoStat{Cores: 1, ModelName: processorName}
@ -301,7 +301,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
}
}
if c.CPU >= 0 {
finishCPUInfo(&c)
finishCPUInfo(ctx, &c)
ret = append(ret, c)
}
return ret, nil
@ -390,7 +390,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) {
if logical {
ret := 0
// https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_pslinux.py#L599
procCpuinfo := common.HostProc("cpuinfo")
procCpuinfo := common.HostProcWithContext(ctx, "cpuinfo")
lines, err := common.ReadLines(procCpuinfo)
if err == nil {
for _, line := range lines {
@ -404,7 +404,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) {
}
}
if ret == 0 {
procStat := common.HostProc("stat")
procStat := common.HostProcWithContext(ctx, "stat")
lines, err = common.ReadLines(procStat)
if err != nil {
return 0, err
@ -425,7 +425,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) {
// https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964
// https://lkml.org/lkml/2019/2/26/41
for _, glob := range []string{"devices/system/cpu/cpu[0-9]*/topology/core_cpus_list", "devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list"} {
if files, err := filepath.Glob(common.HostSys(glob)); err == nil {
if files, err := filepath.Glob(common.HostSysWithContext(ctx, glob)); err == nil {
for _, file := range files {
lines, err := common.ReadLines(file)
if err != nil || len(lines) != 1 {
@ -440,7 +440,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) {
}
}
// https://github.com/giampaolo/psutil/blob/122174a10b75c9beebe15f6c07dcf3afbe3b120d/psutil/_pslinux.py#L631-L652
filename := common.HostProc("cpuinfo")
filename := common.HostProcWithContext(ctx, "cpuinfo")
lines, err := common.ReadLines(filename)
if err != nil {
return 0, err

View File

@ -6,8 +6,9 @@ package disk
import (
"context"
"github.com/shirou/gopsutil/v3/internal/common"
"golang.org/x/sys/unix"
"github.com/shirou/gopsutil/v3/internal/common"
)
// PartitionsWithContext returns disk partition.

View File

@ -260,10 +260,10 @@ func readMountFile(root string) (lines []string, useMounts bool, filename string
func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
// by default, try "/proc/1/..." first
root := common.HostProc(path.Join("1"))
root := common.HostProcWithContext(ctx, path.Join("1"))
// force preference for dirname of HOST_PROC_MOUNTINFO, if set #1271
hpmPath := os.Getenv("HOST_PROC_MOUNTINFO")
hpmPath := common.HostProcMountInfoWithContext(ctx)
if hpmPath != "" {
root = filepath.Dir(hpmPath)
}
@ -274,13 +274,13 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro
return nil, err
}
// fallback to "/proc/self/..." #1159
lines, useMounts, filename, err = readMountFile(common.HostProc(path.Join("self")))
lines, useMounts, filename, err = readMountFile(common.HostProcWithContext(ctx, path.Join("self")))
if err != nil {
return nil, err
}
}
fs, err := getFileSystems()
fs, err := getFileSystems(ctx)
if err != nil && !all {
return nil, err
}
@ -342,7 +342,7 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro
}
if strings.HasPrefix(d.Device, "/dev/mapper/") {
devpath, err := filepath.EvalSymlinks(common.HostDev(strings.Replace(d.Device, "/dev", "", 1)))
devpath, err := filepath.EvalSymlinks(common.HostDevWithContext(ctx, strings.Replace(d.Device, "/dev", "", 1)))
if err == nil {
d.Device = devpath
}
@ -351,7 +351,7 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro
// /dev/root is not the real device name
// so we get the real device name from its major/minor number
if d.Device == "/dev/root" {
devpath, err := os.Readlink(common.HostSys("/dev/block/" + blockDeviceID))
devpath, err := os.Readlink(common.HostSysWithContext(ctx, "/dev/block/"+blockDeviceID))
if err == nil {
d.Device = strings.Replace(d.Device, "root", filepath.Base(devpath), 1)
}
@ -364,8 +364,8 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro
}
// getFileSystems returns supported filesystems from /proc/filesystems
func getFileSystems() ([]string, error) {
filename := common.HostProc("filesystems")
func getFileSystems(ctx context.Context) ([]string, error) {
filename := common.HostProcWithContext(ctx, "filesystems")
lines, err := common.ReadLines(filename)
if err != nil {
return nil, err
@ -387,7 +387,7 @@ func getFileSystems() ([]string, error) {
}
func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) {
filename := common.HostProc("diskstats")
filename := common.HostProcWithContext(ctx, "diskstats")
lines, err := common.ReadLines(filename)
if err != nil {
return nil, err
@ -492,7 +492,7 @@ func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
minor := unix.Minor(uint64(stat.Rdev))
// Try to get the serial from udev data
udevDataPath := common.HostRun(fmt.Sprintf("udev/data/b%d:%d", major, minor))
udevDataPath := common.HostRunWithContext(ctx, fmt.Sprintf("udev/data/b%d:%d", major, minor))
if udevdata, err := ioutil.ReadFile(udevDataPath); err == nil {
scanner := bufio.NewScanner(bytes.NewReader(udevdata))
for scanner.Scan() {
@ -505,7 +505,7 @@ func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
// Try to get the serial from sysfs, look at the disk device (minor 0) directly
// because if it is a partition it is not going to contain any device information
devicePath := common.HostSys(fmt.Sprintf("dev/block/%d:0/device", major))
devicePath := common.HostSysWithContext(ctx, fmt.Sprintf("dev/block/%d:0/device", major))
model, _ := ioutil.ReadFile(filepath.Join(devicePath, "model"))
serial, _ := ioutil.ReadFile(filepath.Join(devicePath, "serial"))
if len(model) > 0 && len(serial) > 0 {
@ -516,7 +516,7 @@ func SerialNumberWithContext(ctx context.Context, name string) (string, error) {
func LabelWithContext(ctx context.Context, name string) (string, error) {
// Try label based on devicemapper name
dmname_filename := common.HostSys(fmt.Sprintf("block/%s/dm/name", name))
dmname_filename := common.HostSysWithContext(ctx, fmt.Sprintf("block/%s/dm/name", name))
if !common.PathExists(dmname_filename) {
return "", nil

View File

@ -25,6 +25,8 @@ import (
"strconv"
"strings"
"time"
"github.com/shirou/gopsutil/v3/common"
)
var (
@ -321,6 +323,23 @@ func PathExistsWithContents(filename string) bool {
return info.Size() > 4 // at least 4 bytes
}
// GetEnvWithContext retrieves the environment variable key. If it does not exist it returns the default.
// The context may optionally contain a map superseding os.EnvKey.
func GetEnvWithContext(ctx context.Context, key string, dfault string, combineWith ...string) string {
var value string
if env, ok := ctx.Value(common.EnvKey).(common.EnvMap); ok {
value = env[common.EnvKeyType(key)]
}
if value == "" {
value = os.Getenv(key)
}
if value == "" {
value = dfault
}
return combine(value, combineWith)
}
// GetEnv retrieves the environment variable key. If it does not exist it returns the default.
func GetEnv(key string, dfault string, combineWith ...string) string {
value := os.Getenv(key)
@ -328,6 +347,10 @@ func GetEnv(key string, dfault string, combineWith ...string) string {
value = dfault
}
return combine(value, combineWith)
}
func combine(value string, combineWith []string) string {
switch len(combineWith) {
case 0:
return value
@ -369,6 +392,38 @@ func HostRoot(combineWith ...string) string {
return GetEnv("HOST_ROOT", "/", combineWith...)
}
func HostProcWithContext(ctx context.Context, combineWith ...string) string {
return GetEnvWithContext(ctx, "HOST_PROC", "/proc", combineWith...)
}
func HostProcMountInfoWithContext(ctx context.Context, combineWith ...string) string {
return GetEnvWithContext(ctx, "HOST_PROC_MOUNTINFO", "", combineWith...)
}
func HostSysWithContext(ctx context.Context, combineWith ...string) string {
return GetEnvWithContext(ctx, "HOST_SYS", "/sys", combineWith...)
}
func HostEtcWithContext(ctx context.Context, combineWith ...string) string {
return GetEnvWithContext(ctx, "HOST_ETC", "/etc", combineWith...)
}
func HostVarWithContext(ctx context.Context, combineWith ...string) string {
return GetEnvWithContext(ctx, "HOST_VAR", "/var", combineWith...)
}
func HostRunWithContext(ctx context.Context, combineWith ...string) string {
return GetEnvWithContext(ctx, "HOST_RUN", "/run", combineWith...)
}
func HostDevWithContext(ctx context.Context, combineWith ...string) string {
return GetEnvWithContext(ctx, "HOST_DEV", "/dev", combineWith...)
}
func HostRootWithContext(ctx context.Context, combineWith ...string) string {
return GetEnvWithContext(ctx, "HOST_ROOT", "/", combineWith...)
}
// getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running
// sysctl commands (see DoSysctrl).
func getSysctrlEnv(env []string) []string {

View File

@ -31,7 +31,11 @@ func DoSysctrl(mib string) ([]string, error) {
}
func NumProcs() (uint64, error) {
f, err := os.Open(HostProc())
return NumProcsWithContext(context.Background())
}
func NumProcsWithContext(ctx context.Context) (uint64, error) {
f, err := os.Open(HostProcWithContext(ctx))
if err != nil {
return 0, err
}
@ -67,7 +71,7 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) {
statFile = "uptime"
}
filename := HostProc(statFile)
filename := HostProcWithContext(ctx, statFile)
lines, err := ReadLines(filename)
if os.IsPermission(err) {
var info syscall.Sysinfo_t
@ -139,7 +143,7 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
}
cachedVirtMutex.RUnlock()
filename := HostProc("xen")
filename := HostProcWithContext(ctx, "xen")
if PathExists(filename) {
system = "xen"
role = "guest" // assume guest
@ -154,7 +158,7 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
}
}
filename = HostProc("modules")
filename = HostProcWithContext(ctx, "modules")
if PathExists(filename) {
contents, err := ReadLines(filename)
if err == nil {
@ -177,7 +181,7 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
}
}
filename = HostProc("cpuinfo")
filename = HostProcWithContext(ctx, "cpuinfo")
if PathExists(filename) {
contents, err := ReadLines(filename)
if err == nil {
@ -190,7 +194,7 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
}
}
filename = HostProc("bus/pci/devices")
filename = HostProcWithContext(ctx, "bus/pci/devices")
if PathExists(filename) {
contents, err := ReadLines(filename)
if err == nil {
@ -200,7 +204,7 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
}
}
filename = HostProc()
filename = HostProcWithContext(ctx)
if PathExists(filepath.Join(filename, "bc", "0")) {
system = "openvz"
role = "host"
@ -251,15 +255,15 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
}
}
if PathExists(HostEtc("os-release")) {
p, _, err := GetOSRelease()
if PathExists(HostEtcWithContext(ctx, "os-release")) {
p, _, err := GetOSReleaseWithContext(ctx)
if err == nil && p == "coreos" {
system = "rkt" // Is it true?
role = "host"
}
}
if PathExists(HostRoot(".dockerenv")) {
if PathExists(HostRootWithContext(ctx, ".dockerenv")) {
system = "docker"
role = "guest"
}
@ -278,7 +282,11 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
}
func GetOSRelease() (platform string, version string, err error) {
contents, err := ReadLines(HostEtc("os-release"))
return GetOSReleaseWithContext(context.Background())
}
func GetOSReleaseWithContext(ctx context.Context) (platform string, version string, err error) {
contents, err := ReadLines(HostEtcWithContext(ctx, "os-release"))
if err != nil {
return "", "", nil // return empty
}

View File

@ -8,8 +8,9 @@ import (
"fmt"
"unsafe"
"github.com/shirou/gopsutil/v3/internal/common"
"golang.org/x/sys/unix"
"github.com/shirou/gopsutil/v3/internal/common"
)
func getHwMemsize() (uint64, error) {

View File

@ -37,7 +37,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) {
}
func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
vm, _, err := fillFromMeminfoWithContext()
vm, _, err := fillFromMeminfoWithContext(ctx)
if err != nil {
return nil, err
}
@ -49,15 +49,15 @@ func VirtualMemoryEx() (*VirtualMemoryExStat, error) {
}
func VirtualMemoryExWithContext(ctx context.Context) (*VirtualMemoryExStat, error) {
_, vmEx, err := fillFromMeminfoWithContext()
_, vmEx, err := fillFromMeminfoWithContext(ctx)
if err != nil {
return nil, err
}
return vmEx, nil
}
func fillFromMeminfoWithContext() (*VirtualMemoryStat, *VirtualMemoryExStat, error) {
filename := common.HostProc("meminfo")
func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *VirtualMemoryExStat, error) {
filename := common.HostProcWithContext(ctx, "meminfo")
lines, _ := common.ReadLines(filename)
// flag if MemAvailable is in /proc/meminfo (kernel 3.14+)
@ -318,7 +318,7 @@ func fillFromMeminfoWithContext() (*VirtualMemoryStat, *VirtualMemoryExStat, err
if !memavail {
if activeFile && inactiveFile && sReclaimable {
ret.Available = calculateAvailVmem(ret, retEx)
ret.Available = calculateAvailVmem(ctx, ret, retEx)
} else {
ret.Available = ret.Cached + ret.Free
}
@ -351,7 +351,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
} else {
ret.UsedPercent = 0
}
filename := common.HostProc("vmstat")
filename := common.HostProcWithContext(ctx, "vmstat")
lines, _ := common.ReadLines(filename)
for _, l := range lines {
fields := strings.Fields(l)
@ -403,10 +403,10 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) {
// calculateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide
// "MemAvailable:" column. It reimplements an algorithm from the link below
// https://github.com/giampaolo/psutil/pull/890
func calculateAvailVmem(ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 {
func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 {
var watermarkLow uint64
fn := common.HostProc("zoneinfo")
fn := common.HostProcWithContext(ctx, "zoneinfo")
lines, err := common.ReadLines(fn)
if err != nil {
return ret.Free + ret.Cached // fallback under kernel 2.6.13
@ -458,18 +458,18 @@ func SwapDevices() ([]*SwapDevice, error) {
}
func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
swapsFilePath := common.HostProc(swapsFilename)
swapsFilePath := common.HostProcWithContext(ctx, swapsFilename)
f, err := os.Open(swapsFilePath)
if err != nil {
return nil, err
}
defer f.Close()
return parseSwapsFile(f)
return parseSwapsFile(ctx, f)
}
func parseSwapsFile(r io.Reader) ([]*SwapDevice, error) {
swapsFilePath := common.HostProc(swapsFilename)
func parseSwapsFile(ctx context.Context, r io.Reader) ([]*SwapDevice, error) {
swapsFilePath := common.HostProcWithContext(ctx, swapsFilename)
scanner := bufio.NewScanner(r)
if !scanner.Scan() {
if err := scanner.Err(); err != nil {

View File

@ -259,7 +259,7 @@ func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) {
}
func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) {
return IOCounters(pernic)
return IOCountersWithContext(ctx, pernic)
}
func FilterCounters() ([]FilterStat, error) {

View File

@ -50,7 +50,7 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) {
}
func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) {
filename := common.HostProc("net/dev")
filename := common.HostProcWithContext(ctx, "net/dev")
return IOCountersByFileWithContext(ctx, pernic, filename)
}
@ -177,7 +177,7 @@ func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoC
protos[p] = true
}
filename := common.HostProc("net/snmp")
filename := common.HostProcWithContext(ctx, "net/snmp")
lines, err := common.ReadLines(filename)
if err != nil {
return nil, err
@ -230,8 +230,8 @@ func FilterCounters() ([]FilterStat, error) {
}
func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) {
countfile := common.HostProc("sys/net/netfilter/nf_conntrack_count")
maxfile := common.HostProc("sys/net/netfilter/nf_conntrack_max")
countfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_count")
maxfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_max")
count, err := common.ReadInts(countfile)
if err != nil {
@ -260,7 +260,7 @@ func ConntrackStats(percpu bool) ([]ConntrackStat, error) {
// ConntrackStatsWithContext returns more detailed info about the conntrack table
func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) {
return conntrackStatsFromFile(common.HostProc("net/stat/nf_conntrack"), percpu)
return conntrackStatsFromFile(common.HostProcWithContext(ctx, "net/stat/nf_conntrack"), percpu)
}
// conntrackStatsFromFile returns more detailed info about the conntrack table
@ -459,7 +459,7 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p
if !ok {
return nil, fmt.Errorf("invalid kind, %s", kind)
}
root := common.HostProc()
root := common.HostProcWithContext(ctx)
var err error
var inodes map[string][]inodeMap
if pid == 0 {
@ -531,7 +531,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma
if !skipUids {
// fetch process owner Real, effective, saved set, and filesystem UIDs
proc := process{Pid: conn.Pid}
conn.Uids, _ = proc.getUids()
conn.Uids, _ = proc.getUids(ctx)
}
ret = append(ret, conn)
@ -599,7 +599,7 @@ func Pids() ([]int32, error) {
func PidsWithContext(ctx context.Context) ([]int32, error) {
var ret []int32
d, err := os.Open(common.HostProc())
d, err := os.Open(common.HostProcWithContext(ctx))
if err != nil {
return nil, err
}
@ -631,8 +631,8 @@ type process struct {
}
// Uids returns user ids of the process as a slice of the int
func (p *process) getUids() ([]int32, error) {
err := p.fillFromStatus()
func (p *process) getUids(ctx context.Context) ([]int32, error) {
err := p.fillFromStatus(ctx)
if err != nil {
return []int32{}, err
}
@ -640,9 +640,9 @@ func (p *process) getUids() ([]int32, error) {
}
// Get status from /proc/(pid)/status
func (p *process) fillFromStatus() error {
func (p *process) fillFromStatus(ctx context.Context) error {
pid := p.Pid
statPath := common.HostProc(strconv.Itoa(int(pid)), "status")
statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "status")
contents, err := ioutil.ReadFile(statPath)
if err != nil {
return err

View File

@ -10,10 +10,11 @@ import (
"strconv"
"strings"
"github.com/shirou/gopsutil/v3/internal/common"
"github.com/shirou/gopsutil/v3/net"
"github.com/tklauser/go-sysconf"
"golang.org/x/sys/unix"
"github.com/shirou/gopsutil/v3/internal/common"
"github.com/shirou/gopsutil/v3/net"
)
// copied from sys/sysctl.h

View File

@ -101,7 +101,7 @@ func (p *Process) TgidWithContext(ctx context.Context) (int32, error) {
}
func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
return p.fillFromExeWithContext()
return p.fillFromExeWithContext(ctx)
}
func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
@ -121,7 +121,7 @@ func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) {
}
func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
return p.fillFromCwdWithContext()
return p.fillFromCwdWithContext(ctx)
}
func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
@ -135,7 +135,7 @@ func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) {
func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) {
// see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details
pid := p.Pid
statPath := common.HostProc(strconv.Itoa(int(pid)), "stat")
statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "stat")
contents, err := ioutil.ReadFile(statPath)
if err != nil {
return false, err
@ -203,7 +203,7 @@ func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) {
}
func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) {
rlimits, err := p.fillFromLimitsWithContext()
rlimits, err := p.fillFromLimitsWithContext(ctx)
if !gatherUsed || err != nil {
return rlimits, err
}
@ -258,7 +258,7 @@ func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) (
}
func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) {
return p.fillFromIOWithContext()
return p.fillFromIOWithContext(ctx)
}
func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) {
@ -284,7 +284,7 @@ func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) {
ret := make(map[int32]*cpu.TimesStat)
taskPath := common.HostProc(strconv.Itoa(int(p.Pid)), "task")
taskPath := common.HostProcWithContext(ctx, strconv.Itoa(int(p.Pid)), "task")
tids, err := readPidsFromDir(taskPath)
if err != nil {
@ -315,7 +315,7 @@ func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) {
}
func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
meminfo, _, err := p.fillFromStatmWithContext()
meminfo, _, err := p.fillFromStatmWithContext(ctx)
if err != nil {
return nil, err
}
@ -323,7 +323,7 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e
}
func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) {
_, memInfoEx, err := p.fillFromStatmWithContext()
_, memInfoEx, err := p.fillFromStatmWithContext(ctx)
if err != nil {
return nil, err
}
@ -381,12 +381,12 @@ func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net
func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) {
pid := p.Pid
var ret []MemoryMapsStat
smapsPath := common.HostProc(strconv.Itoa(int(pid)), "smaps")
smapsPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "smaps")
if grouped {
ret = make([]MemoryMapsStat, 1)
// If smaps_rollup exists (require kernel >= 4.15), then we will use it
// for pre-summed memory information for a process.
smapsRollupPath := common.HostProc(strconv.Itoa(int(pid)), "smaps_rollup")
smapsRollupPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "smaps_rollup")
if _, err := os.Stat(smapsRollupPath); !os.IsNotExist(err) {
smapsPath = smapsRollupPath
}
@ -482,7 +482,7 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M
}
func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) {
environPath := common.HostProc(strconv.Itoa(int(p.Pid)), "environ")
environPath := common.HostProcWithContext(ctx, strconv.Itoa(int(p.Pid)), "environ")
environContent, err := ioutil.ReadFile(environPath)
if err != nil {
@ -508,9 +508,9 @@ func limitToUint(val string) (uint64, error) {
}
// Get num_fds from /proc/(pid)/limits
func (p *Process) fillFromLimitsWithContext() ([]RlimitStat, error) {
func (p *Process) fillFromLimitsWithContext(ctx context.Context) ([]RlimitStat, error) {
pid := p.Pid
limitsFile := common.HostProc(strconv.Itoa(int(pid)), "limits")
limitsFile := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "limits")
d, err := os.Open(limitsFile)
if err != nil {
return nil, err
@ -603,7 +603,7 @@ func (p *Process) fillFromLimitsWithContext() ([]RlimitStat, error) {
// Get list of /proc/(pid)/fd files
func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []string, error) {
pid := p.Pid
statPath := common.HostProc(strconv.Itoa(int(pid)), "fd")
statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "fd")
d, err := os.Open(statPath)
if err != nil {
return statPath, []string{}, err
@ -643,9 +643,9 @@ func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFile
}
// Get cwd from /proc/(pid)/cwd
func (p *Process) fillFromCwdWithContext() (string, error) {
func (p *Process) fillFromCwdWithContext(ctx context.Context) (string, error) {
pid := p.Pid
cwdPath := common.HostProc(strconv.Itoa(int(pid)), "cwd")
cwdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cwd")
cwd, err := os.Readlink(cwdPath)
if err != nil {
return "", err
@ -654,9 +654,9 @@ func (p *Process) fillFromCwdWithContext() (string, error) {
}
// Get exe from /proc/(pid)/exe
func (p *Process) fillFromExeWithContext() (string, error) {
func (p *Process) fillFromExeWithContext(ctx context.Context) (string, error) {
pid := p.Pid
exePath := common.HostProc(strconv.Itoa(int(pid)), "exe")
exePath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "exe")
exe, err := os.Readlink(exePath)
if err != nil {
return "", err
@ -667,7 +667,7 @@ func (p *Process) fillFromExeWithContext() (string, error) {
// Get cmdline from /proc/(pid)/cmdline
func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) {
pid := p.Pid
cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline")
cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline")
cmdline, err := ioutil.ReadFile(cmdPath)
if err != nil {
return "", err
@ -681,7 +681,7 @@ func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error
func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) {
pid := p.Pid
cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline")
cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline")
cmdline, err := ioutil.ReadFile(cmdPath)
if err != nil {
return nil, err
@ -702,9 +702,9 @@ func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string
}
// Get IO status from /proc/(pid)/io
func (p *Process) fillFromIOWithContext() (*IOCountersStat, error) {
func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, error) {
pid := p.Pid
ioPath := common.HostProc(strconv.Itoa(int(pid)), "io")
ioPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "io")
ioline, err := ioutil.ReadFile(ioPath)
if err != nil {
return nil, err
@ -738,9 +738,9 @@ func (p *Process) fillFromIOWithContext() (*IOCountersStat, error) {
}
// Get memory info from /proc/(pid)/statm
func (p *Process) fillFromStatmWithContext() (*MemoryInfoStat, *MemoryInfoExStat, error) {
func (p *Process) fillFromStatmWithContext(ctx context.Context) (*MemoryInfoStat, *MemoryInfoExStat, error) {
pid := p.Pid
memPath := common.HostProc(strconv.Itoa(int(pid)), "statm")
memPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "statm")
contents, err := ioutil.ReadFile(memPath)
if err != nil {
return nil, nil, err
@ -791,7 +791,7 @@ func (p *Process) fillFromStatmWithContext() (*MemoryInfoStat, *MemoryInfoExStat
// Get name from /proc/(pid)/comm or /proc/(pid)/status
func (p *Process) fillNameWithContext(ctx context.Context) error {
err := p.fillFromCommWithContext()
err := p.fillFromCommWithContext(ctx)
if err == nil && p.name != "" && len(p.name) < 15 {
return nil
}
@ -799,9 +799,9 @@ func (p *Process) fillNameWithContext(ctx context.Context) error {
}
// Get name from /proc/(pid)/comm
func (p *Process) fillFromCommWithContext() error {
func (p *Process) fillFromCommWithContext(ctx context.Context) error {
pid := p.Pid
statPath := common.HostProc(strconv.Itoa(int(pid)), "comm")
statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "comm")
contents, err := ioutil.ReadFile(statPath)
if err != nil {
return err
@ -818,7 +818,7 @@ func (p *Process) fillFromStatus() error {
func (p *Process) fillFromStatusWithContext(ctx context.Context) error {
pid := p.Pid
statPath := common.HostProc(strconv.Itoa(int(pid)), "status")
statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "status")
contents, err := ioutil.ReadFile(statPath)
if err != nil {
return err
@ -1023,9 +1023,9 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui
var statPath string
if tid == -1 {
statPath = common.HostProc(strconv.Itoa(int(pid)), "stat")
statPath = common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "stat")
} else {
statPath = common.HostProc(strconv.Itoa(int(pid)), "task", strconv.Itoa(int(tid)), "stat")
statPath = common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "task", strconv.Itoa(int(tid)), "stat")
}
contents, err := ioutil.ReadFile(statPath)
@ -1129,7 +1129,7 @@ func (p *Process) fillFromStatWithContext(ctx context.Context) (uint64, int32, *
}
func pidsWithContext(ctx context.Context) ([]int32, error) {
return readPidsFromDir(common.HostProc())
return readPidsFromDir(common.HostProcWithContext(ctx))
}
func ProcessesWithContext(ctx context.Context) ([]*Process, error) {

View File

@ -109,8 +109,8 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) {
return false, err
}
if isMount(common.HostProc()) { // if /<HOST_PROC>/proc exists and is mounted, check if /<HOST_PROC>/proc/<PID> folder exists
_, err := os.Stat(common.HostProc(strconv.Itoa(int(pid))))
if isMount(common.HostProcWithContext(ctx)) { // if /<HOST_PROC>/proc exists and is mounted, check if /<HOST_PROC>/proc/<PID> folder exists
_, err := os.Stat(common.HostProcWithContext(ctx, strconv.Itoa(int(pid))))
if os.IsNotExist(err) {
return false, nil
}

View File

@ -30,7 +30,7 @@ type MemoryMapsStat struct {
type MemoryInfoExStat struct{}
func pidsWithContext(ctx context.Context) ([]int32, error) {
return readPidsFromDir(common.HostProc())
return readPidsFromDir(common.HostProcWithContext(ctx))
}
func ProcessesWithContext(ctx context.Context) ([]*Process, error) {
@ -199,7 +199,7 @@ func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) {
func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []string, error) {
pid := p.Pid
statPath := common.HostProc(strconv.Itoa(int(pid)), "fd")
statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "fd")
d, err := os.Open(statPath)
if err != nil {
return statPath, []string{}, err
@ -211,7 +211,7 @@ func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []stri
func (p *Process) fillFromPathCwdWithContext(ctx context.Context) (string, error) {
pid := p.Pid
cwdPath := common.HostProc(strconv.Itoa(int(pid)), "path", "cwd")
cwdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "path", "cwd")
cwd, err := os.Readlink(cwdPath)
if err != nil {
return "", err
@ -221,7 +221,7 @@ func (p *Process) fillFromPathCwdWithContext(ctx context.Context) (string, error
func (p *Process) fillFromPathAOutWithContext(ctx context.Context) (string, error) {
pid := p.Pid
cwdPath := common.HostProc(strconv.Itoa(int(pid)), "path", "a.out")
cwdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "path", "a.out")
exe, err := os.Readlink(cwdPath)
if err != nil {
return "", err
@ -231,7 +231,7 @@ func (p *Process) fillFromPathAOutWithContext(ctx context.Context) (string, erro
func (p *Process) fillFromExecnameWithContext(ctx context.Context) (string, error) {
pid := p.Pid
execNamePath := common.HostProc(strconv.Itoa(int(pid)), "execname")
execNamePath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "execname")
exe, err := ioutil.ReadFile(execNamePath)
if err != nil {
return "", err
@ -241,7 +241,7 @@ func (p *Process) fillFromExecnameWithContext(ctx context.Context) (string, erro
func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) {
pid := p.Pid
cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline")
cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline")
cmdline, err := ioutil.ReadFile(cmdPath)
if err != nil {
return "", err
@ -258,7 +258,7 @@ func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error
func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) {
pid := p.Pid
cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline")
cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline")
cmdline, err := ioutil.ReadFile(cmdPath)
if err != nil {
return nil, err

31
vendor/github.com/vektah/gqlparser/v2/ast/comment.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
package ast
import (
"strconv"
"strings"
)
type Comment struct {
Value string
Position *Position
}
func (c *Comment) Text() string {
return strings.TrimPrefix(c.Value, "#")
}
type CommentGroup struct {
List []*Comment
}
func (c *CommentGroup) Dump() string {
if len(c.List) == 0 {
return ""
}
var builder strings.Builder
for _, comment := range c.List {
builder.WriteString(comment.Value)
builder.WriteString("\n")
}
return strconv.Quote(builder.String())
}

View File

@ -31,6 +31,10 @@ type Definition struct {
Position *Position `dump:"-"`
BuiltIn bool `dump:"-"`
BeforeDescriptionComment *CommentGroup
AfterDescriptionComment *CommentGroup
EndOfDefinitionComment *CommentGroup
}
func (d *Definition) IsLeafType() bool {
@ -66,6 +70,9 @@ type FieldDefinition struct {
Type *Type
Directives DirectiveList
Position *Position `dump:"-"`
BeforeDescriptionComment *CommentGroup
AfterDescriptionComment *CommentGroup
}
type ArgumentDefinition struct {
@ -75,6 +82,9 @@ type ArgumentDefinition struct {
Type *Type
Directives DirectiveList
Position *Position `dump:"-"`
BeforeDescriptionComment *CommentGroup
AfterDescriptionComment *CommentGroup
}
type EnumValueDefinition struct {
@ -82,6 +92,9 @@ type EnumValueDefinition struct {
Name string
Directives DirectiveList
Position *Position `dump:"-"`
BeforeDescriptionComment *CommentGroup
AfterDescriptionComment *CommentGroup
}
type DirectiveDefinition struct {
@ -91,4 +104,7 @@ type DirectiveDefinition struct {
Locations []DirectiveLocation
IsRepeatable bool
Position *Position `dump:"-"`
BeforeDescriptionComment *CommentGroup
AfterDescriptionComment *CommentGroup
}

View File

@ -4,6 +4,7 @@ type QueryDocument struct {
Operations OperationList
Fragments FragmentDefinitionList
Position *Position `dump:"-"`
Comment *CommentGroup
}
type SchemaDocument struct {
@ -13,6 +14,7 @@ type SchemaDocument struct {
Definitions DefinitionList
Extensions DefinitionList
Position *Position `dump:"-"`
Comment *CommentGroup
}
func (d *SchemaDocument) Merge(other *SchemaDocument) {
@ -35,6 +37,8 @@ type Schema struct {
Implements map[string][]*Definition
Description string
Comment *CommentGroup
}
// AddTypes is the helper to add types definition to the schema
@ -70,10 +74,15 @@ type SchemaDefinition struct {
Directives DirectiveList
OperationTypes OperationTypeDefinitionList
Position *Position `dump:"-"`
BeforeDescriptionComment *CommentGroup
AfterDescriptionComment *CommentGroup
EndOfDefinitionComment *CommentGroup
}
type OperationTypeDefinition struct {
Operation Operation
Type string
Position *Position `dump:"-"`
Comment *CommentGroup
}

View File

@ -9,6 +9,7 @@ type FragmentSpread struct {
Definition *FragmentDefinition
Position *Position `dump:"-"`
Comment *CommentGroup
}
type InlineFragment struct {
@ -20,6 +21,7 @@ type InlineFragment struct {
ObjectDefinition *Definition
Position *Position `dump:"-"`
Comment *CommentGroup
}
type FragmentDefinition struct {
@ -35,4 +37,5 @@ type FragmentDefinition struct {
Definition *Definition
Position *Position `dump:"-"`
Comment *CommentGroup
}

View File

@ -15,6 +15,7 @@ type OperationDefinition struct {
Directives DirectiveList
SelectionSet SelectionSet
Position *Position `dump:"-"`
Comment *CommentGroup
}
type VariableDefinition struct {
@ -23,6 +24,7 @@ type VariableDefinition struct {
DefaultValue *Value
Directives DirectiveList
Position *Position `dump:"-"`
Comment *CommentGroup
// Requires validation
Definition *Definition

View File

@ -22,6 +22,7 @@ type Field struct {
Directives DirectiveList
SelectionSet SelectionSet
Position *Position `dump:"-"`
Comment *CommentGroup
// Require validation
Definition *FieldDefinition
@ -32,6 +33,7 @@ type Argument struct {
Name string
Value *Value
Position *Position `dump:"-"`
Comment *CommentGroup
}
func (s *Field) ArgumentMap(vars map[string]interface{}) map[string]interface{} {

View File

@ -26,6 +26,7 @@ type Value struct {
Children ChildValueList
Kind ValueKind
Position *Position `dump:"-"`
Comment *CommentGroup
// Require validation
Definition *Definition
@ -37,6 +38,7 @@ type ChildValue struct {
Name string
Value *Value
Position *Position `dump:"-"`
Comment *CommentGroup
}
func (v *Value) Value(vars map[string]interface{}) (interface{}, error) {

View File

@ -121,10 +121,7 @@ func (s *Lexer) ReadToken() (token Token, err error) {
case '|':
return s.makeValueToken(Pipe, "")
case '#':
if comment, err := s.readComment(); err != nil {
return comment, err
}
return s.ReadToken()
return s.readComment()
case '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
return s.readName()

View File

@ -26,6 +26,31 @@ simple tokens:
column: 3
value: 'foo'
- name: records line and column with comments
input: "\n\n\n#foo\n #bar\n foo\n"
tokens:
-
kind: COMMENT
start: 3
end: 7
line: 4
column: 0
value: '#foo'
-
kind: COMMENT
start: 10
end: 14
line: 5
column: 3
value: '#bar'
-
kind: NAME
start: 17
end: 20
line: 6
column: 3
value: 'foo'
- name: skips whitespace
input: "\n\n foo\n\n\n"
tokens:
@ -35,15 +60,6 @@ simple tokens:
end: 9
value: 'foo'
- name: skips comments
input: "\n #comment\n foo#comment\n"
tokens:
-
kind: NAME
start: 18
end: 21
value: 'foo'
- name: skips commas
input: ",,,foo,,,"
tokens:
@ -78,6 +94,57 @@ simple tokens:
end: 1
value: a
lexes comments:
- name: basic
input: '#simple'
tokens:
-
kind: COMMENT
start: 0
end: 7
value: '#simple'
- name: two lines
input: "#first\n#second"
tokens:
-
kind: COMMENT
start: 0
end: 6
value: "#first"
-
kind: COMMENT
start: 7
end: 14
value: "#second"
- name: whitespace
input: '# white space '
tokens:
-
kind: COMMENT
start: 0
end: 14
value: '# white space '
- name: not escaped
input: '#not escaped \n\r\b\t\f'
tokens:
-
kind: COMMENT
start: 0
end: 23
value: '#not escaped \n\r\b\t\f'
- name: slashes
input: '#slashes \\ \/'
tokens:
-
kind: COMMENT
start: 0
end: 14
value: '#slashes \\ \/'
lexes strings:
- name: basic
input: '"simple"'

View File

@ -17,6 +17,46 @@ type parser struct {
peekError error
prev lexer.Token
comment *ast.CommentGroup
commentConsuming bool
}
func (p *parser) consumeComment() (*ast.Comment, bool) {
if p.err != nil {
return nil, false
}
tok := p.peek()
if tok.Kind != lexer.Comment {
return nil, false
}
p.next()
return &ast.Comment{
Value: tok.Value,
Position: &tok.Pos,
}, true
}
func (p *parser) consumeCommentGroup() {
if p.err != nil {
return
}
if p.commentConsuming {
return
}
p.commentConsuming = true
var comments []*ast.Comment
for {
comment, ok := p.consumeComment()
if !ok {
break
}
comments = append(comments, comment)
}
p.comment = &ast.CommentGroup{List: comments}
p.commentConsuming = false
}
func (p *parser) peekPos() *ast.Position {
@ -36,6 +76,9 @@ func (p *parser) peek() lexer.Token {
if !p.peeked {
p.peekToken, p.peekError = p.lexer.ReadToken()
p.peeked = true
if p.peekToken.Kind == lexer.Comment {
p.consumeCommentGroup()
}
}
return p.peekToken
@ -54,31 +97,37 @@ func (p *parser) next() lexer.Token {
}
if p.peeked {
p.peeked = false
p.comment = nil
p.prev, p.err = p.peekToken, p.peekError
} else {
p.prev, p.err = p.lexer.ReadToken()
if p.prev.Kind == lexer.Comment {
p.consumeCommentGroup()
}
}
return p.prev
}
func (p *parser) expectKeyword(value string) lexer.Token {
func (p *parser) expectKeyword(value string) (lexer.Token, *ast.CommentGroup) {
tok := p.peek()
comment := p.comment
if tok.Kind == lexer.Name && tok.Value == value {
return p.next()
return p.next(), comment
}
p.error(tok, "Expected %s, found %s", strconv.Quote(value), tok.String())
return tok
return tok, comment
}
func (p *parser) expect(kind lexer.Type) lexer.Token {
func (p *parser) expect(kind lexer.Type) (lexer.Token, *ast.CommentGroup) {
tok := p.peek()
comment := p.comment
if tok.Kind == kind {
return p.next()
return p.next(), comment
}
p.error(tok, "Expected %s, found %s", kind, tok.Kind.String())
return tok
return tok, comment
}
func (p *parser) skip(kind lexer.Type) bool {
@ -115,10 +164,10 @@ func (p *parser) many(start lexer.Type, end lexer.Type, cb func()) {
p.next()
}
func (p *parser) some(start lexer.Type, end lexer.Type, cb func()) {
func (p *parser) some(start lexer.Type, end lexer.Type, cb func()) *ast.CommentGroup {
hasDef := p.skip(start)
if !hasDef {
return
return nil
}
called := false
@ -129,8 +178,10 @@ func (p *parser) some(start lexer.Type, end lexer.Type, cb func()) {
if !called {
p.error(p.peek(), "expected at least one definition, found %s", p.peek().Kind.String())
return
return nil
}
comment := p.comment
p.next()
return comment
}

View File

@ -45,6 +45,7 @@ func (p *parser) parseOperationDefinition() *OperationDefinition {
if p.peek().Kind == lexer.BraceL {
return &OperationDefinition{
Position: p.peekPos(),
Comment: p.comment,
Operation: Query,
SelectionSet: p.parseRequiredSelectionSet(),
}
@ -52,6 +53,7 @@ func (p *parser) parseOperationDefinition() *OperationDefinition {
var od OperationDefinition
od.Position = p.peekPos()
od.Comment = p.comment
od.Operation = p.parseOperationType()
if p.peek().Kind == lexer.Name {
@ -91,6 +93,7 @@ func (p *parser) parseVariableDefinitions() VariableDefinitionList {
func (p *parser) parseVariableDefinition() *VariableDefinition {
var def VariableDefinition
def.Position = p.peekPos()
def.Comment = p.comment
def.Variable = p.parseVariable()
p.expect(lexer.Colon)
@ -144,6 +147,7 @@ func (p *parser) parseSelection() Selection {
func (p *parser) parseField() *Field {
var field Field
field.Position = p.peekPos()
field.Comment = p.comment
field.Alias = p.parseName()
if p.skip(lexer.Colon) {
@ -173,6 +177,7 @@ func (p *parser) parseArguments(isConst bool) ArgumentList {
func (p *parser) parseArgument(isConst bool) *Argument {
arg := Argument{}
arg.Position = p.peekPos()
arg.Comment = p.comment
arg.Name = p.parseName()
p.expect(lexer.Colon)
@ -181,11 +186,12 @@ func (p *parser) parseArgument(isConst bool) *Argument {
}
func (p *parser) parseFragment() Selection {
p.expect(lexer.Spread)
_, comment := p.expect(lexer.Spread)
if peek := p.peek(); peek.Kind == lexer.Name && peek.Value != "on" {
return &FragmentSpread{
Position: p.peekPos(),
Comment: comment,
Name: p.parseFragmentName(),
Directives: p.parseDirectives(false),
}
@ -193,6 +199,7 @@ func (p *parser) parseFragment() Selection {
var def InlineFragment
def.Position = p.peekPos()
def.Comment = comment
if p.peek().Value == "on" {
p.next() // "on"
@ -207,6 +214,7 @@ func (p *parser) parseFragment() Selection {
func (p *parser) parseFragmentDefinition() *FragmentDefinition {
var def FragmentDefinition
def.Position = p.peekPos()
def.Comment = p.comment
p.expectKeyword("fragment")
def.Name = p.parseFragmentName()
@ -243,7 +251,7 @@ func (p *parser) parseValueLiteral(isConst bool) *Value {
p.unexpectedError()
return nil
}
return &Value{Position: &token.Pos, Raw: p.parseVariable(), Kind: Variable}
return &Value{Position: &token.Pos, Comment: p.comment, Raw: p.parseVariable(), Kind: Variable}
case lexer.Int:
kind = IntValue
case lexer.Float:
@ -268,32 +276,35 @@ func (p *parser) parseValueLiteral(isConst bool) *Value {
p.next()
return &Value{Position: &token.Pos, Raw: token.Value, Kind: kind}
return &Value{Position: &token.Pos, Comment: p.comment, Raw: token.Value, Kind: kind}
}
func (p *parser) parseList(isConst bool) *Value {
var values ChildValueList
pos := p.peekPos()
comment := p.comment
p.many(lexer.BracketL, lexer.BracketR, func() {
values = append(values, &ChildValue{Value: p.parseValueLiteral(isConst)})
})
return &Value{Children: values, Kind: ListValue, Position: pos}
return &Value{Children: values, Kind: ListValue, Position: pos, Comment: comment}
}
func (p *parser) parseObject(isConst bool) *Value {
var fields ChildValueList
pos := p.peekPos()
comment := p.comment
p.many(lexer.BraceL, lexer.BraceR, func() {
fields = append(fields, p.parseObjectField(isConst))
})
return &Value{Children: fields, Kind: ObjectValue, Position: pos}
return &Value{Children: fields, Kind: ObjectValue, Position: pos, Comment: comment}
}
func (p *parser) parseObjectField(isConst bool) *ChildValue {
field := ChildValue{}
field.Position = p.peekPos()
field.Comment = p.comment
field.Name = p.parseName()
p.expect(lexer.Colon)
@ -343,7 +354,7 @@ func (p *parser) parseTypeReference() *Type {
}
func (p *parser) parseName() string {
token := p.expect(lexer.Name)
token, _ := p.expect(lexer.Name)
return token.Value
}

View File

@ -436,6 +436,7 @@ large queries:
- <Field>
Alias: "id"
Name: "id"
Comment: "# Copyright (c) 2015-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n"
- <OperationDefinition>
Operation: Operation("mutation")
Name: "likeStory"

View File

@ -45,7 +45,7 @@ func (p *parser) parseSchemaDocument() *SchemaDocument {
return nil
}
var description string
var description descriptionWithComment
if p.peek().Kind == lexer.BlockString || p.peek().Kind == lexer.String {
description = p.parseDescription()
}
@ -63,7 +63,7 @@ func (p *parser) parseSchemaDocument() *SchemaDocument {
case "directive":
doc.Directives = append(doc.Directives, p.parseDirectiveDefinition(description))
case "extend":
if description != "" {
if description.text != "" {
p.unexpectedToken(p.prev)
}
p.parseTypeSystemExtension(&doc)
@ -73,20 +73,26 @@ func (p *parser) parseSchemaDocument() *SchemaDocument {
}
}
// treat end of file comments
doc.Comment = p.comment
return &doc
}
func (p *parser) parseDescription() string {
func (p *parser) parseDescription() descriptionWithComment {
token := p.peek()
var desc descriptionWithComment
if token.Kind != lexer.BlockString && token.Kind != lexer.String {
return ""
return desc
}
return p.next().Value
desc.comment = p.comment
desc.text = p.next().Value
return desc
}
func (p *parser) parseTypeSystemDefinition(description string) *Definition {
func (p *parser) parseTypeSystemDefinition(description descriptionWithComment) *Definition {
tok := p.peek()
if tok.Kind != lexer.Name {
p.unexpectedError()
@ -112,15 +118,17 @@ func (p *parser) parseTypeSystemDefinition(description string) *Definition {
}
}
func (p *parser) parseSchemaDefinition(description string) *SchemaDefinition {
p.expectKeyword("schema")
func (p *parser) parseSchemaDefinition(description descriptionWithComment) *SchemaDefinition {
_, comment := p.expectKeyword("schema")
def := SchemaDefinition{Description: description}
def := SchemaDefinition{}
def.Position = p.peekPos()
def.Description = description
def.BeforeDescriptionComment = description.comment
def.Description = description.text
def.AfterDescriptionComment = comment
def.Directives = p.parseDirectives(true)
p.some(lexer.BraceL, lexer.BraceR, func() {
def.EndOfDefinitionComment = p.some(lexer.BraceL, lexer.BraceR, func() {
def.OperationTypes = append(def.OperationTypes, p.parseOperationTypeDefinition())
})
return &def
@ -129,35 +137,40 @@ func (p *parser) parseSchemaDefinition(description string) *SchemaDefinition {
func (p *parser) parseOperationTypeDefinition() *OperationTypeDefinition {
var op OperationTypeDefinition
op.Position = p.peekPos()
op.Comment = p.comment
op.Operation = p.parseOperationType()
p.expect(lexer.Colon)
op.Type = p.parseName()
return &op
}
func (p *parser) parseScalarTypeDefinition(description string) *Definition {
p.expectKeyword("scalar")
func (p *parser) parseScalarTypeDefinition(description descriptionWithComment) *Definition {
_, comment := p.expectKeyword("scalar")
var def Definition
def.Position = p.peekPos()
def.BeforeDescriptionComment = description.comment
def.Description = description.text
def.AfterDescriptionComment = comment
def.Kind = Scalar
def.Description = description
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
return &def
}
func (p *parser) parseObjectTypeDefinition(description string) *Definition {
p.expectKeyword("type")
func (p *parser) parseObjectTypeDefinition(description descriptionWithComment) *Definition {
_, comment := p.expectKeyword("type")
var def Definition
def.Position = p.peekPos()
def.Kind = Object
def.Description = description
def.BeforeDescriptionComment = description.comment
def.Description = description.text
def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Interfaces = p.parseImplementsInterfaces()
def.Directives = p.parseDirectives(true)
def.Fields = p.parseFieldsDefinition()
def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition()
return &def
}
@ -176,18 +189,26 @@ func (p *parser) parseImplementsInterfaces() []string {
return types
}
func (p *parser) parseFieldsDefinition() FieldList {
func (p *parser) parseFieldsDefinition() (FieldList, *CommentGroup) {
var defs FieldList
p.some(lexer.BraceL, lexer.BraceR, func() {
comment := p.some(lexer.BraceL, lexer.BraceR, func() {
defs = append(defs, p.parseFieldDefinition())
})
return defs
return defs, comment
}
func (p *parser) parseFieldDefinition() *FieldDefinition {
var def FieldDefinition
def.Position = p.peekPos()
def.Description = p.parseDescription()
desc := p.parseDescription()
if desc.text != "" {
def.BeforeDescriptionComment = desc.comment
def.Description = desc.text
}
p.peek() // peek to set p.comment
def.AfterDescriptionComment = p.comment
def.Name = p.parseName()
def.Arguments = p.parseArgumentDefs()
p.expect(lexer.Colon)
@ -208,7 +229,15 @@ func (p *parser) parseArgumentDefs() ArgumentDefinitionList {
func (p *parser) parseArgumentDef() *ArgumentDefinition {
var def ArgumentDefinition
def.Position = p.peekPos()
def.Description = p.parseDescription()
desc := p.parseDescription()
if desc.text != "" {
def.BeforeDescriptionComment = desc.comment
def.Description = desc.text
}
p.peek() // peek to set p.comment
def.AfterDescriptionComment = p.comment
def.Name = p.parseName()
p.expect(lexer.Colon)
def.Type = p.parseTypeReference()
@ -222,7 +251,15 @@ func (p *parser) parseArgumentDef() *ArgumentDefinition {
func (p *parser) parseInputValueDef() *FieldDefinition {
var def FieldDefinition
def.Position = p.peekPos()
def.Description = p.parseDescription()
desc := p.parseDescription()
if desc.text != "" {
def.BeforeDescriptionComment = desc.comment
def.Description = desc.text
}
p.peek() // peek to set p.comment
def.AfterDescriptionComment = p.comment
def.Name = p.parseName()
p.expect(lexer.Colon)
def.Type = p.parseTypeReference()
@ -233,27 +270,31 @@ func (p *parser) parseInputValueDef() *FieldDefinition {
return &def
}
func (p *parser) parseInterfaceTypeDefinition(description string) *Definition {
p.expectKeyword("interface")
func (p *parser) parseInterfaceTypeDefinition(description descriptionWithComment) *Definition {
_, comment := p.expectKeyword("interface")
var def Definition
def.Position = p.peekPos()
def.Kind = Interface
def.Description = description
def.BeforeDescriptionComment = description.comment
def.Description = description.text
def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Interfaces = p.parseImplementsInterfaces()
def.Directives = p.parseDirectives(true)
def.Fields = p.parseFieldsDefinition()
def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition()
return &def
}
func (p *parser) parseUnionTypeDefinition(description string) *Definition {
p.expectKeyword("union")
func (p *parser) parseUnionTypeDefinition(description descriptionWithComment) *Definition {
_, comment := p.expectKeyword("union")
var def Definition
def.Position = p.peekPos()
def.Kind = Union
def.Description = description
def.BeforeDescriptionComment = description.comment
def.Description = description.text
def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
def.Types = p.parseUnionMemberTypes()
@ -274,87 +315,101 @@ func (p *parser) parseUnionMemberTypes() []string {
return types
}
func (p *parser) parseEnumTypeDefinition(description string) *Definition {
p.expectKeyword("enum")
func (p *parser) parseEnumTypeDefinition(description descriptionWithComment) *Definition {
_, comment := p.expectKeyword("enum")
var def Definition
def.Position = p.peekPos()
def.Kind = Enum
def.Description = description
def.BeforeDescriptionComment = description.comment
def.Description = description.text
def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
def.EnumValues = p.parseEnumValuesDefinition()
def.EnumValues, def.EndOfDefinitionComment = p.parseEnumValuesDefinition()
return &def
}
func (p *parser) parseEnumValuesDefinition() EnumValueList {
func (p *parser) parseEnumValuesDefinition() (EnumValueList, *CommentGroup) {
var values EnumValueList
p.some(lexer.BraceL, lexer.BraceR, func() {
comment := p.some(lexer.BraceL, lexer.BraceR, func() {
values = append(values, p.parseEnumValueDefinition())
})
return values
return values, comment
}
func (p *parser) parseEnumValueDefinition() *EnumValueDefinition {
return &EnumValueDefinition{
Position: p.peekPos(),
Description: p.parseDescription(),
Name: p.parseName(),
Directives: p.parseDirectives(true),
var def EnumValueDefinition
def.Position = p.peekPos()
desc := p.parseDescription()
if desc.text != "" {
def.BeforeDescriptionComment = desc.comment
def.Description = desc.text
}
p.peek() // peek to set p.comment
def.AfterDescriptionComment = p.comment
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
return &def
}
func (p *parser) parseInputObjectTypeDefinition(description string) *Definition {
p.expectKeyword("input")
func (p *parser) parseInputObjectTypeDefinition(description descriptionWithComment) *Definition {
_, comment := p.expectKeyword("input")
var def Definition
def.Position = p.peekPos()
def.Kind = InputObject
def.Description = description
def.BeforeDescriptionComment = description.comment
def.Description = description.text
def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
def.Fields = p.parseInputFieldsDefinition()
def.Fields, def.EndOfDefinitionComment = p.parseInputFieldsDefinition()
return &def
}
func (p *parser) parseInputFieldsDefinition() FieldList {
func (p *parser) parseInputFieldsDefinition() (FieldList, *CommentGroup) {
var values FieldList
p.some(lexer.BraceL, lexer.BraceR, func() {
comment := p.some(lexer.BraceL, lexer.BraceR, func() {
values = append(values, p.parseInputValueDef())
})
return values
return values, comment
}
func (p *parser) parseTypeSystemExtension(doc *SchemaDocument) {
p.expectKeyword("extend")
_, comment := p.expectKeyword("extend")
switch p.peek().Value {
case "schema":
doc.SchemaExtension = append(doc.SchemaExtension, p.parseSchemaExtension())
doc.SchemaExtension = append(doc.SchemaExtension, p.parseSchemaExtension(comment))
case "scalar":
doc.Extensions = append(doc.Extensions, p.parseScalarTypeExtension())
doc.Extensions = append(doc.Extensions, p.parseScalarTypeExtension(comment))
case "type":
doc.Extensions = append(doc.Extensions, p.parseObjectTypeExtension())
doc.Extensions = append(doc.Extensions, p.parseObjectTypeExtension(comment))
case "interface":
doc.Extensions = append(doc.Extensions, p.parseInterfaceTypeExtension())
doc.Extensions = append(doc.Extensions, p.parseInterfaceTypeExtension(comment))
case "union":
doc.Extensions = append(doc.Extensions, p.parseUnionTypeExtension())
doc.Extensions = append(doc.Extensions, p.parseUnionTypeExtension(comment))
case "enum":
doc.Extensions = append(doc.Extensions, p.parseEnumTypeExtension())
doc.Extensions = append(doc.Extensions, p.parseEnumTypeExtension(comment))
case "input":
doc.Extensions = append(doc.Extensions, p.parseInputObjectTypeExtension())
doc.Extensions = append(doc.Extensions, p.parseInputObjectTypeExtension(comment))
default:
p.unexpectedError()
}
}
func (p *parser) parseSchemaExtension() *SchemaDefinition {
func (p *parser) parseSchemaExtension(comment *CommentGroup) *SchemaDefinition {
p.expectKeyword("schema")
var def SchemaDefinition
def.Position = p.peekPos()
def.AfterDescriptionComment = comment
def.Directives = p.parseDirectives(true)
p.some(lexer.BraceL, lexer.BraceR, func() {
def.EndOfDefinitionComment = p.some(lexer.BraceL, lexer.BraceR, func() {
def.OperationTypes = append(def.OperationTypes, p.parseOperationTypeDefinition())
})
if len(def.Directives) == 0 && len(def.OperationTypes) == 0 {
@ -363,11 +418,12 @@ func (p *parser) parseSchemaExtension() *SchemaDefinition {
return &def
}
func (p *parser) parseScalarTypeExtension() *Definition {
func (p *parser) parseScalarTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("scalar")
var def Definition
def.Position = p.peekPos()
def.AfterDescriptionComment = comment
def.Kind = Scalar
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
@ -377,42 +433,45 @@ func (p *parser) parseScalarTypeExtension() *Definition {
return &def
}
func (p *parser) parseObjectTypeExtension() *Definition {
func (p *parser) parseObjectTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("type")
var def Definition
def.Position = p.peekPos()
def.AfterDescriptionComment = comment
def.Kind = Object
def.Name = p.parseName()
def.Interfaces = p.parseImplementsInterfaces()
def.Directives = p.parseDirectives(true)
def.Fields = p.parseFieldsDefinition()
def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition()
if len(def.Interfaces) == 0 && len(def.Directives) == 0 && len(def.Fields) == 0 {
p.unexpectedError()
}
return &def
}
func (p *parser) parseInterfaceTypeExtension() *Definition {
func (p *parser) parseInterfaceTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("interface")
var def Definition
def.Position = p.peekPos()
def.AfterDescriptionComment = comment
def.Kind = Interface
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
def.Fields = p.parseFieldsDefinition()
def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition()
if len(def.Directives) == 0 && len(def.Fields) == 0 {
p.unexpectedError()
}
return &def
}
func (p *parser) parseUnionTypeExtension() *Definition {
func (p *parser) parseUnionTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("union")
var def Definition
def.Position = p.peekPos()
def.AfterDescriptionComment = comment
def.Kind = Union
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
@ -424,43 +483,47 @@ func (p *parser) parseUnionTypeExtension() *Definition {
return &def
}
func (p *parser) parseEnumTypeExtension() *Definition {
func (p *parser) parseEnumTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("enum")
var def Definition
def.Position = p.peekPos()
def.AfterDescriptionComment = comment
def.Kind = Enum
def.Name = p.parseName()
def.Directives = p.parseDirectives(true)
def.EnumValues = p.parseEnumValuesDefinition()
def.EnumValues, def.EndOfDefinitionComment = p.parseEnumValuesDefinition()
if len(def.Directives) == 0 && len(def.EnumValues) == 0 {
p.unexpectedError()
}
return &def
}
func (p *parser) parseInputObjectTypeExtension() *Definition {
func (p *parser) parseInputObjectTypeExtension(comment *CommentGroup) *Definition {
p.expectKeyword("input")
var def Definition
def.Position = p.peekPos()
def.AfterDescriptionComment = comment
def.Kind = InputObject
def.Name = p.parseName()
def.Directives = p.parseDirectives(false)
def.Fields = p.parseInputFieldsDefinition()
def.Fields, def.EndOfDefinitionComment = p.parseInputFieldsDefinition()
if len(def.Directives) == 0 && len(def.Fields) == 0 {
p.unexpectedError()
}
return &def
}
func (p *parser) parseDirectiveDefinition(description string) *DirectiveDefinition {
p.expectKeyword("directive")
func (p *parser) parseDirectiveDefinition(description descriptionWithComment) *DirectiveDefinition {
_, comment := p.expectKeyword("directive")
p.expect(lexer.At)
var def DirectiveDefinition
def.Position = p.peekPos()
def.Description = description
def.BeforeDescriptionComment = description.comment
def.Description = description.text
def.AfterDescriptionComment = comment
def.Name = p.parseName()
def.Arguments = p.parseArgumentDefs()
@ -487,7 +550,7 @@ func (p *parser) parseDirectiveLocations() []DirectiveLocation {
}
func (p *parser) parseDirectiveLocation() DirectiveLocation {
name := p.expect(lexer.Name)
name, _ := p.expect(lexer.Name)
switch name.Value {
case `QUERY`:
@ -533,3 +596,8 @@ func (p *parser) parseDirectiveLocation() DirectiveLocation {
p.unexpectedToken(name)
return ""
}
type descriptionWithComment struct {
text string
comment *CommentGroup
}

View File

@ -15,6 +15,67 @@ object types:
Name: "world"
Type: String
- name: with comments
input: |
# Hello
# Hello another
type Hello {
# World
# World another
world: String
# end of type comments
}
# end of file comments
ast: |
<SchemaDocument>
Definitions: [Definition]
- <Definition>
Kind: DefinitionKind("OBJECT")
Name: "Hello"
Fields: [FieldDefinition]
- <FieldDefinition>
Name: "world"
Type: String
AfterDescriptionComment: "# World\n# World another\n"
AfterDescriptionComment: "# Hello\n# Hello another\n"
EndOfDefinitionComment: "# end of type comments\n"
Comment: "# end of file comments\n"
- name: with comments and description
input: |
# Hello
# Hello another
"type description"
# Hello after description
# Hello after description another
type Hello {
# World
# World another
"field description"
# World after description
# World after description another
world: String
# end of definition coments
# end of definition comments another
}
ast: |
<SchemaDocument>
Definitions: [Definition]
- <Definition>
Kind: DefinitionKind("OBJECT")
Description: "type description"
Name: "Hello"
Fields: [FieldDefinition]
- <FieldDefinition>
Description: "field description"
Name: "world"
Type: String
BeforeDescriptionComment: "# World\n# World another\n"
AfterDescriptionComment: "# World after description\n# World after description another\n"
BeforeDescriptionComment: "# Hello\n# Hello another\n"
AfterDescriptionComment: "# Hello after description\n# Hello after description another\n"
EndOfDefinitionComment: "# end of definition coments\n# end of definition comments another\n"
- name: with description
input: |
"Description"
@ -35,6 +96,7 @@ object types:
- name: with block description
input: |
# Before description comment
"""
Description
"""
@ -53,6 +115,8 @@ object types:
- <FieldDefinition>
Name: "world"
Type: String
BeforeDescriptionComment: "# Before description comment\n"
AfterDescriptionComment: "# Even with comments between them\n"
- name: with field arg
input: |
type Hello {
@ -146,8 +210,11 @@ object types:
type extensions:
- name: Object extension
input: |
# comment
extend type Hello {
# comment world
world: String
# end of definition comment
}
ast: |
<SchemaDocument>
@ -159,6 +226,9 @@ type extensions:
- <FieldDefinition>
Name: "world"
Type: String
AfterDescriptionComment: "# comment world\n"
AfterDescriptionComment: "# comment\n"
EndOfDefinitionComment: "# end of definition comment\n"
- name: without any fields
input: "extend type Hello implements Greeting"
@ -277,6 +347,30 @@ schema definition:
Operation: Operation("query")
Type: "Query"
- name: with comments and description
input: |
# before description comment
"description"
# after description comment
schema {
# before field comment
query: Query
# after field comment
}
ast: |
<SchemaDocument>
Schema: [SchemaDefinition]
- <SchemaDefinition>
Description: "description"
OperationTypes: [OperationTypeDefinition]
- <OperationTypeDefinition>
Operation: Operation("query")
Type: "Query"
Comment: "# before field comment\n"
BeforeDescriptionComment: "# before description comment\n"
AfterDescriptionComment: "# after description comment\n"
EndOfDefinitionComment: "# after field comment\n"
schema extensions:
- name: simple
input: |
@ -292,6 +386,26 @@ schema extensions:
Operation: Operation("mutation")
Type: "Mutation"
- name: with comment and description
input: |
# before extend comment
extend schema {
# before field comment
mutation: Mutation
# after field comment
}
ast: |
<SchemaDocument>
SchemaExtension: [SchemaDefinition]
- <SchemaDefinition>
OperationTypes: [OperationTypeDefinition]
- <OperationTypeDefinition>
Operation: Operation("mutation")
Type: "Mutation"
Comment: "# before field comment\n"
AfterDescriptionComment: "# before extend comment\n"
EndOfDefinitionComment: "# after field comment\n"
- name: directive only
input: "extend schema @directive"
ast: |

View File

@ -101,13 +101,19 @@ func (o MarshalOptions) Format(m proto.Message) string {
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(m)
return o.marshal(nil, m)
}
// MarshalAppend appends the textproto format encoding of m to b,
// returning the result.
func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) {
return o.marshal(b, m)
}
// marshal is a centralized function that all marshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) {
var delims = [2]byte{'{', '}'}
if o.Multiline && o.Indent == "" {
@ -117,7 +123,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
o.Resolver = protoregistry.GlobalTypes
}
internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII)
internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII)
if err != nil {
return nil, err
}
@ -125,7 +131,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
// Treat nil message interface as an empty message,
// in which case there is nothing to output.
if m == nil {
return []byte{}, nil
return b, nil
}
enc := encoder{internalEnc, o}

View File

@ -53,8 +53,10 @@ type encoderState struct {
// If outputASCII is true, strings will be serialized in such a way that
// multi-byte UTF-8 sequences are escaped. This property ensures that the
// overall output is ASCII (as opposed to UTF-8).
func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
e := &Encoder{}
func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
e := &Encoder{
encoderState: encoderState{out: buf},
}
if len(indent) > 0 {
if strings.Trim(indent, " \t") != "" {
return nil, errors.New("indent may only be composed of space and tab characters")
@ -195,13 +197,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte {
// WriteInt writes out the given signed integer value.
func (e *Encoder) WriteInt(n int64) {
e.prepareNext(scalar)
e.out = append(e.out, strconv.FormatInt(n, 10)...)
e.out = strconv.AppendInt(e.out, n, 10)
}
// WriteUint writes out the given unsigned integer value.
func (e *Encoder) WriteUint(n uint64) {
e.prepareNext(scalar)
e.out = append(e.out, strconv.FormatUint(n, 10)...)
e.out = strconv.AppendUint(e.out, n, 10)
}
// WriteLiteral writes out the given string as a literal value without quotes.

View File

@ -183,13 +183,58 @@ const (
// Field names for google.protobuf.ExtensionRangeOptions.
const (
ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration"
ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification"
ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option"
ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration"
ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification"
)
// Field numbers for google.protobuf.ExtensionRangeOptions.
const (
ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2
ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3
)
// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState.
const (
ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState"
ExtensionRangeOptions_VerificationState_enum_name = "VerificationState"
)
// Names for google.protobuf.ExtensionRangeOptions.Declaration.
const (
ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration"
ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration"
)
// Field names for google.protobuf.ExtensionRangeOptions.Declaration.
const (
ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number"
ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type"
ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated"
ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated"
ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
)
// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration.
const (
ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1
ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3
ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4
ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.FieldDescriptorProto.
@ -540,6 +585,7 @@ const (
FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
FieldOptions_Retention_field_name protoreflect.Name = "retention"
FieldOptions_Target_field_name protoreflect.Name = "target"
FieldOptions_Targets_field_name protoreflect.Name = "targets"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@ -552,6 +598,7 @@ const (
FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact"
FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention"
FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target"
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@ -567,6 +614,7 @@ const (
FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16
FieldOptions_Retention_field_number protoreflect.FieldNumber = 17
FieldOptions_Target_field_number protoreflect.FieldNumber = 18
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)

View File

@ -32,6 +32,7 @@ const (
Type_Options_field_name protoreflect.Name = "options"
Type_SourceContext_field_name protoreflect.Name = "source_context"
Type_Syntax_field_name protoreflect.Name = "syntax"
Type_Edition_field_name protoreflect.Name = "edition"
Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name"
Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields"
@ -39,6 +40,7 @@ const (
Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options"
Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context"
Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax"
Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition"
)
// Field numbers for google.protobuf.Type.
@ -49,6 +51,7 @@ const (
Type_Options_field_number protoreflect.FieldNumber = 4
Type_SourceContext_field_number protoreflect.FieldNumber = 5
Type_Syntax_field_number protoreflect.FieldNumber = 6
Type_Edition_field_number protoreflect.FieldNumber = 7
)
// Names for google.protobuf.Field.
@ -121,12 +124,14 @@ const (
Enum_Options_field_name protoreflect.Name = "options"
Enum_SourceContext_field_name protoreflect.Name = "source_context"
Enum_Syntax_field_name protoreflect.Name = "syntax"
Enum_Edition_field_name protoreflect.Name = "edition"
Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name"
Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue"
Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options"
Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context"
Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax"
Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition"
)
// Field numbers for google.protobuf.Enum.
@ -136,6 +141,7 @@ const (
Enum_Options_field_number protoreflect.FieldNumber = 3
Enum_SourceContext_field_number protoreflect.FieldNumber = 4
Enum_Syntax_field_number protoreflect.FieldNumber = 5
Enum_Edition_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.EnumValue.

View File

@ -33,7 +33,7 @@ var (
return !inOneof(ox) && inOneof(oy)
}
// Fields in disjoint oneof sets are sorted by declaration index.
if ox != nil && oy != nil && ox != oy {
if inOneof(ox) && inOneof(oy) && ox != oy {
return ox.Index() < oy.Index()
}
// Fields sorted by field number.

View File

@ -51,7 +51,7 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
Minor = 30
Minor = 31
Patch = 0
PreRelease = ""
)

View File

@ -73,23 +73,27 @@ func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protore
}
func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
sizeTag := protowire.SizeTag(num)
if fd.IsPacked() && list.Len() > 0 {
content := 0
for i, llen := 0, list.Len(); i < llen; i++ {
content += o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return protowire.SizeTag(num) + protowire.SizeBytes(content)
return sizeTag + protowire.SizeBytes(content)
}
for i, llen := 0, list.Len(); i < llen; i++ {
size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i))
size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return size
}
func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
sizeTag := protowire.SizeTag(num)
mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool {
size += protowire.SizeTag(num)
size += sizeTag
size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value))
return true
})

View File

@ -363,6 +363,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte {
b = p.appendSingularField(b, "retention", nil)
case 18:
b = p.appendSingularField(b, "target", nil)
case 19:
b = p.appendRepeatedField(b, "targets", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@ -418,6 +420,10 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte {
switch (*p)[0] {
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
case 2:
b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration)
case 3:
b = p.appendSingularField(b, "verification", nil)
}
return b
}
@ -473,3 +479,24 @@ func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
}
return b
}
func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte {
if len(*p) == 0 {
return b
}
switch (*p)[0] {
case 1:
b = p.appendSingularField(b, "number", nil)
case 2:
b = p.appendSingularField(b, "full_name", nil)
case 3:
b = p.appendSingularField(b, "type", nil)
case 4:
b = p.appendSingularField(b, "is_repeated", nil)
case 5:
b = p.appendSingularField(b, "reserved", nil)
case 6:
b = p.appendSingularField(b, "repeated", nil)
}
return b
}

File diff suppressed because it is too large Load Diff

View File

@ -167,7 +167,7 @@ import (
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
// ) to obtain a formatter capable of generating timestamps in this format.
type Timestamp struct {
state protoimpl.MessageState

21
vendor/modules.txt vendored
View File

@ -1,4 +1,4 @@
# github.com/99designs/gqlgen v0.17.33
# github.com/99designs/gqlgen v0.17.34
## explicit; go 1.18
github.com/99designs/gqlgen
github.com/99designs/gqlgen/api
@ -54,7 +54,7 @@ github.com/beorn7/perks/quantile
# github.com/boltdb/bolt v1.3.1
## explicit
github.com/boltdb/bolt
# github.com/caddyserver/certmagic v0.18.0
# github.com/caddyserver/certmagic v0.18.2
## explicit; go 1.19
github.com/caddyserver/certmagic
# github.com/casbin/casbin/v2 v2.71.1
@ -200,7 +200,7 @@ github.com/hashicorp/go-msgpack/codec
# github.com/hashicorp/golang-lru v0.5.4
## explicit; go 1.12
github.com/hashicorp/golang-lru/simplelru
# github.com/hashicorp/golang-lru/v2 v2.0.3
# github.com/hashicorp/golang-lru/v2 v2.0.4
## explicit; go 1.18
github.com/hashicorp/golang-lru/v2
github.com/hashicorp/golang-lru/v2/simplelru
@ -226,7 +226,7 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
# github.com/klauspost/compress v1.16.6
# github.com/klauspost/compress v1.16.7
## explicit; go 1.18
github.com/klauspost/compress/s2
# github.com/klauspost/cpuid/v2 v2.2.5
@ -272,17 +272,17 @@ github.com/mattn/go-isatty
# github.com/matttproud/golang_protobuf_extensions v1.0.4
## explicit; go 1.9
github.com/matttproud/golang_protobuf_extensions/pbutil
# github.com/mholt/acmez v1.1.1
# github.com/mholt/acmez v1.2.0
## explicit; go 1.20
github.com/mholt/acmez
github.com/mholt/acmez/acme
# github.com/miekg/dns v1.1.54
# github.com/miekg/dns v1.1.55
## explicit; go 1.19
github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
# github.com/minio/minio-go/v7 v7.0.57
# github.com/minio/minio-go/v7 v7.0.59
## explicit; go 1.17
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials
@ -343,8 +343,9 @@ github.com/rs/xid
# github.com/russross/blackfriday/v2 v2.1.0
## explicit
github.com/russross/blackfriday/v2
# github.com/shirou/gopsutil/v3 v3.23.5
# github.com/shirou/gopsutil/v3 v3.23.6
## explicit; go 1.15
github.com/shirou/gopsutil/v3/common
github.com/shirou/gopsutil/v3/cpu
github.com/shirou/gopsutil/v3/disk
github.com/shirou/gopsutil/v3/internal/common
@ -385,7 +386,7 @@ github.com/valyala/bytebufferpool
# github.com/valyala/fasttemplate v1.2.2
## explicit; go 1.12
github.com/valyala/fasttemplate
# github.com/vektah/gqlparser/v2 v2.5.3
# github.com/vektah/gqlparser/v2 v2.5.6
## explicit; go 1.16
github.com/vektah/gqlparser/v2
github.com/vektah/gqlparser/v2/ast
@ -514,7 +515,7 @@ golang.org/x/tools/internal/pkgbits
golang.org/x/tools/internal/tokeninternal
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
# google.golang.org/protobuf v1.30.0
# google.golang.org/protobuf v1.31.0
## explicit; go 1.11
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire