Update dependencies

This commit is contained in:
Ingo Oppermann 2023-07-17 17:00:42 +02:00
parent 0519059f3d
commit cd31893286
No known key found for this signature in database
GPG Key ID: 2AB32426E9DD229E
90 changed files with 10830 additions and 364 deletions

View File

@ -195,7 +195,6 @@ func (m *manager) AcquireCertificates(ctx context.Context, hostnames []string) e
if len(removed) != 0 {
m.logger.WithField("hostnames", removed).Info().Log("Unmanage certificates")
m.config.Unmanage(removed)
}
m.lock.Lock()
@ -216,7 +215,6 @@ func (m *manager) ManageCertificates(ctx context.Context, hostnames []string) er
if len(removed) != 0 {
m.logger.WithField("hostnames", removed).Info().Log("Unmanage certificates")
m.config.Unmanage(removed)
}
if len(added) == 0 {

20
go.mod
View File

@ -3,13 +3,13 @@ module github.com/datarhei/core/v16
go 1.18
require (
github.com/99designs/gqlgen v0.17.34
github.com/99designs/gqlgen v0.17.35
github.com/Masterminds/semver/v3 v3.2.1
github.com/adhocore/gronx v1.6.3
github.com/atrox/haikunatorgo/v2 v2.0.1
github.com/caddyserver/certmagic v0.18.2
github.com/casbin/casbin/v2 v2.71.1
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230710090938-bfcb7f5f7b3e
github.com/caddyserver/certmagic v0.19.0
github.com/casbin/casbin/v2 v2.72.0
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230717141633-8f0e5ce4c68c
github.com/datarhei/gosrt v0.5.2
github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a
github.com/fujiwara/shapeio v1.0.0
@ -24,18 +24,18 @@ require (
github.com/invopop/jsonschema v0.4.0
github.com/joho/godotenv v1.5.1
github.com/klauspost/cpuid/v2 v2.2.5
github.com/labstack/echo/v4 v4.10.2
github.com/labstack/echo/v4 v4.11.1
github.com/lestrrat-go/strftime v1.0.6
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.19
github.com/minio/minio-go/v7 v7.0.59
github.com/minio/minio-go/v7 v7.0.60
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
github.com/prometheus/client_golang v1.16.0
github.com/shirou/gopsutil/v3 v3.23.6
github.com/stretchr/testify v1.8.4
github.com/swaggo/echo-swagger v1.4.0
github.com/swaggo/swag v1.16.1
github.com/vektah/gqlparser/v2 v2.5.6
github.com/vektah/gqlparser/v2 v2.5.8
github.com/xeipuuv/gojsonschema v1.2.0
go.etcd.io/bbolt v1.3.7
go.uber.org/automaxprocs v1.5.2
@ -61,7 +61,7 @@ require (
github.com/fatih/color v1.15.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonpointer v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
@ -103,6 +103,9 @@ require (
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/swaggo/files/v2 v2.0.0 // indirect
github.com/tidwall/gjson v1.14.4 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/urfave/cli/v2 v2.25.5 // indirect
@ -112,6 +115,7 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
github.com/zeebo/blake3 v0.2.3 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.1.12 // indirect
go.uber.org/multierr v1.11.0 // indirect

45
go.sum
View File

@ -1,5 +1,5 @@
github.com/99designs/gqlgen v0.17.34 h1:5cS5/OKFguQt+Ws56uj9FlG2xm1IlcJWNF2jrMIKYFQ=
github.com/99designs/gqlgen v0.17.34/go.mod h1:Axcd3jIFHBVcqzixujJQr1wGqE+lGTpz6u4iZBZg1G8=
github.com/99designs/gqlgen v0.17.35 h1:r0KF1xL3cPMyUArNWeC3e2Ckuc4iiLm7bj5xzYZQYbQ=
github.com/99designs/gqlgen v0.17.35/go.mod h1:Vlf7TeY3ZdVI9SagB5IZE8CYhpq8kJPCVPJ7MrlVoX0=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=
@ -34,10 +34,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/caddyserver/certmagic v0.18.2 h1:Nj2+M+A2Ho9IF6n1wUSbra4mX1X6ALzWpul9HooprHA=
github.com/caddyserver/certmagic v0.18.2/go.mod h1:cLsgYXecH1iVUPjDXw15/1SKjZk/TK+aFfQk5FnugGQ=
github.com/casbin/casbin/v2 v2.71.1 h1:LRHyqM0S1LzM/K59PmfUIN0ZJfLgcOjL4OhOQI/FNXU=
github.com/casbin/casbin/v2 v2.71.1/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
github.com/caddyserver/certmagic v0.19.0 h1:HuJ1Yf1H1jAfmBGrSSQN1XRkafnWcpDtyIiyMV6vmpM=
github.com/caddyserver/certmagic v0.19.0/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8=
github.com/casbin/casbin/v2 v2.72.0 h1:Lzp1h4rfQzjzN8N6FfaDDsLdhmZBqQot2Wc/Rnp8Eis=
github.com/casbin/casbin/v2 v2.72.0/go.mod h1:mzGx0hYW9/ksOSpw3wNjk3NRAroq5VMFYUQ6G43iGPk=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -46,8 +46,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230710090938-bfcb7f5f7b3e h1:PUBHatfuW/qclTFQ062QtxlDEsqH3HlIjqI3vUOKR3c=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230710090938-bfcb7f5f7b3e/go.mod h1:3eKfwhPKoW7faTn+luShRVNMqcIskvlIKjRJ7ShjyL8=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230717141633-8f0e5ce4c68c h1:VECuOSlBtcikfAkb00DFhxKXeJzpMpeUVEZIJRnpEDE=
github.com/datarhei/core-client-go/v16 v16.11.1-0.20230717141633-8f0e5ce4c68c/go.mod h1:3eKfwhPKoW7faTn+luShRVNMqcIskvlIKjRJ7ShjyL8=
github.com/datarhei/gosrt v0.5.2 h1:eagqZwEIiGPNJW0rLep3gwceObyaZ17+iKRc+l4VEpc=
github.com/datarhei/gosrt v0.5.2/go.mod h1:0308GQhAu5hxe2KYdbss901aKceSSKXnwCr8Vs++eiw=
github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a h1:Tf4DSHY1xruBglr+yYP5Wct7czM86GKMYgbXH8a7OFo=
@ -75,8 +75,9 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
@ -164,6 +165,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -175,8 +177,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/labstack/echo/v4 v4.10.2 h1:n1jAhnq/elIFTHr1EYpiYtyKgx4RW9ccVgkqByZaN2M=
github.com/labstack/echo/v4 v4.10.2/go.mod h1:OEyqf2//K1DFdE57vw2DRgWY0M7s65IVQO2FzvI4J5k=
github.com/labstack/echo/v4 v4.11.1 h1:dEpLU2FLg4UVmvCGPuk/APjlH6GDpbEPti61srUUUs4=
github.com/labstack/echo/v4 v4.11.1/go.mod h1:YuYRTSM3CHs2ybfrL8Px48bO6BAnYIN4l8wSTMP6BDQ=
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
@ -216,8 +218,8 @@ github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.59 h1:lxIXwsTIcQkYoEG25rUJbzpmSB/oWeVDmxFo/uWUUsw=
github.com/minio/minio-go/v7 v7.0.59/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/minio-go/v7 v7.0.60 h1:iHkrmWyHFs/eZiWc2F/5jAHtNBAFy+HjdhMX6FkkPWc=
github.com/minio/minio-go/v7 v7.0.60/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@ -308,6 +310,13 @@ github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw
github.com/swaggo/files/v2 v2.0.0/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM=
github.com/swaggo/swag v1.16.1 h1:fTNRhKstPKxcnoKsytm4sahr8FaYzUcT7i1/3nd/fBg=
github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKikGxto=
github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=
github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
@ -321,8 +330,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vektah/gqlparser/v2 v2.5.6 h1:Ou14T0N1s191eRMZ1gARVqohcbe1e8FrcONScsq8cRU=
github.com/vektah/gqlparser/v2 v2.5.6/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME=
github.com/vektah/gqlparser/v2 v2.5.8 h1:pm6WOnGdzFOCfcQo9L3+xzW51mKrlwTEg4Wr7AH1JW4=
github.com/vektah/gqlparser/v2 v2.5.8/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -335,6 +344,12 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=

View File

@ -5,10 +5,144 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
<a name="unreleased"></a>
## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.32...HEAD)
## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.34...HEAD)
<!-- end of if -->
<!-- end of CommitGroups -->
<a name="v0.17.34"></a>
## [v0.17.34](https://github.com/99designs/gqlgen/compare/v0.17.33...v0.17.34) - 2023-06-23
- <a href="https://github.com/99designs/gqlgen/commit/5a70585758275a186b3e76e729644cb0c3e4bd37"><tt>5a705857</tt></a> release v0.17.34
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/1a9dbadd3e1a803e89ff9c6103ba4d9bbcf38029"><tt>1a9dbadd</tt></a> Use "No longer supported" as the default deprecationReason for deprecations without a reason specified (<a href="https://github.com/99designs/gqlgen/pull/2692">#2692</a>)</summary>
* fix: use "No longer supported" as the default deprecationReason for deprecated fields with no reason specified
* test: add integration tests to ensure deprecated fields with no reason set get the default reason defined in the spec `No longer supported`
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/640f383651dee93b64a5cd72a70af4c5dce0831d"><tt>640f3836</tt></a> Update gqlparser dependency (<a href="https://github.com/99designs/gqlgen/pull/2694">#2694</a>)
- <a href="https://github.com/99designs/gqlgen/commit/5ac9fe5945a59cf7ce6370da4dd5dbc6a8d098ff"><tt>5ac9fe59</tt></a> Added flag to omit interface checks (<a href="https://github.com/99designs/gqlgen/pull/2689">#2689</a>)
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/abc3c627d6dfc9f70e22f40f2457b002310a84c7"><tt>abc3c627</tt></a> feat: always use latest apollo sandbox (<a href="https://github.com/99designs/gqlgen/pull/2686">#2686</a>)</summary>
* feat: removeDuplicateTags() validates tags and panic with meaningful error message
* Instead of pinning on _latest without subresource integrity check, update both url and integrity to latest
* Update graphql/playground/apollo_sandbox_playground.go
---------
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/3b295bb4a70486a16e82877422a7b7c55d21f4f9"><tt>3b295bb4</tt></a> added GoInitialismsConfig which overrides the initialisms to be regarded (<a href="https://github.com/99designs/gqlgen/pull/2683">#2683</a>)</summary>
* added GoInitialismsConfig which overrides the initialisms to be regarded
* typo
* adjusted examples and documentation
* removed test with side-effects, adjustend yaml indentations, changed example entry "ID" to "CC" (again? I though I already did that)
* comply with linter
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/ee6add4bddd7ee05e31a37815f281c320eb44e26"><tt>ee6add4b</tt></a> Refactor TypeIdentifier to avoid circular imports (<a href="https://github.com/99designs/gqlgen/pull/2682">#2682</a>)
- <a href="https://github.com/99designs/gqlgen/commit/44376e52e7857a6b7e16d40088d57234c8eeaf64"><tt>44376e52</tt></a> fix subscription example in documentation (<a href="https://github.com/99designs/gqlgen/pull/2677">#2677</a>)
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/d508082896d5ab6de6ea40b42478e54dc2feb74c"><tt>d5080828</tt></a> Reworked integration testing using vitest (<a href="https://github.com/99designs/gqlgen/pull/2675">#2675</a>)</summary>
* Reworked integration using vitest
Added SSE client testing
Fixed SSE Transport parse errors not being sent as event-stream
* Added defer testing using urql
* Cleanup unnecessary dependencies
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/d16f498f5b3659bfe23749ee78993b816df04322"><tt>d16f498f</tt></a> fix: issue with extraFields being thrown away (<a href="https://github.com/99designs/gqlgen/pull/2674">#2674</a>)</summary>
* fix: issue with extraFields being thrown away
* Go fumpt on file
---------
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/71d16aa052167358d95a4dd2ae89806b636b9a1d"><tt>71d16aa0</tt></a> v0.17.33 postrelease bump
<!-- end of Commits -->
<!-- end of Else -->
<!-- end of If NoteGroups -->
<a name="v0.17.33"></a>
## [v0.17.33](https://github.com/99designs/gqlgen/compare/v0.17.32...v0.17.33) - 2023-06-13
- <a href="https://github.com/99designs/gqlgen/commit/a1e34ca0f6c1f4fdbb113485187eeef77fb8cc1f"><tt>a1e34ca0</tt></a> release v0.17.33
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/790a72c1642200c6530da34ae2c094a088ae1dfe"><tt>790a72c1</tt></a> issue-1372: add custom decode func (<a href="https://github.com/99designs/gqlgen/pull/2666">#2666</a>)</summary>
* issue-1372: add custom decode func
* issue-1372: add custom decode method
* issue-1372: fix lint
* issue-1372: add custom decode func
* issue-1372: add custom decode method
* issue-1372: fix lint
* issue-1372: extend functionality by setting up the whole decode config instead of one nested field
* issue-1372: rollback generated.go file
* issue-1372: fix lint
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/c63c60eb910b532c10677caac42b78c02735f4e2"><tt>c63c60eb</tt></a> Update all modules (<a href="https://github.com/99designs/gqlgen/pull/2667">#2667</a>)</summary>
* Update all modules
* Add gqlparser v2.5.3
---------
</details></dd></dl>
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/4a78eb0c9be84793df821616b4ebfa4bfb42a49c"><tt>4a78eb0c</tt></a> minor cleaning: fix some stricter lint rule warnings (<a href="https://github.com/99designs/gqlgen/pull/2665">#2665</a>)</summary>
* Add Changelog notes
* Some spring cleaning
* Update golangci-lint to latest
---------
</details></dd></dl>
- <a href="https://github.com/99designs/gqlgen/commit/1e925f7e5b13566c9f7cdd115e59d39f0df0e4f3"><tt>1e925f7e</tt></a> v0.17.32 postrelease bump
<!-- end of Commits -->
<!-- end of Else -->
<!-- end of If NoteGroups -->
<a name="v0.17.32"></a>
## [v0.17.32](https://github.com/99designs/gqlgen/compare/v0.17.31...v0.17.32) - 2023-06-06
- <a href="https://github.com/99designs/gqlgen/commit/3a81a78bb7370f067c6bf4f3ce79de0e77f885a1"><tt>3a81a78b</tt></a> release v0.17.32

View File

@ -53,7 +53,7 @@ func ApolloSandboxHandler(title, endpoint string) http.HandlerFunc {
"title": title,
"endpoint": endpoint,
"endpointIsAbsolute": endpointHasScheme(endpoint),
"mainSRI": "sha256-/ldbSJ7EovavF815TfCN50qKB9AMvzskb9xiG71bmg2I=",
"mainSRI": "sha256-ldbSJ7EovavF815TfCN50qKB9AMvzskb9xiG71bmg2I=",
})
if err != nil {
panic(err)

View File

@ -24,12 +24,12 @@ var page = template.Must(template.New("graphiql").Parse(`<!DOCTYPE html>
}
</style>
<script
src="https://cdn.jsdelivr.net/npm/react@17.0.2/umd/react.production.min.js"
src="https://cdn.jsdelivr.net/npm/react@18.2.0/umd/react.production.min.js"
integrity="{{.reactSRI}}"
crossorigin="anonymous"
></script>
<script
src="https://cdn.jsdelivr.net/npm/react-dom@17.0.2/umd/react-dom.production.min.js"
src="https://cdn.jsdelivr.net/npm/react-dom@18.2.0/umd/react-dom.production.min.js"
integrity="{{.reactDOMSRI}}"
crossorigin="anonymous"
></script>
@ -82,11 +82,11 @@ func Handler(title string, endpoint string) http.HandlerFunc {
"endpoint": endpoint,
"endpointIsAbsolute": endpointHasScheme(endpoint),
"subscriptionEndpoint": getSubscriptionEndpoint(endpoint),
"version": "2.0.7",
"cssSRI": "sha256-gQryfbGYeYFxnJYnfPStPYFt0+uv8RP8Dm++eh00G9c=",
"jsSRI": "sha256-qQ6pw7LwTLC+GfzN+cJsYXfVWRKH9O5o7+5H96gTJhQ=",
"reactSRI": "sha256-Ipu/TQ50iCCVZBUsZyNJfxrDk0E2yhaEIz0vqI+kFG8=",
"reactDOMSRI": "sha256-nbMykgB6tsOFJ7OdVmPpdqMFVk4ZsqWocT6issAPUF0=",
"version": "3.0.1",
"cssSRI": "sha256-wTzfn13a+pLMB5rMeysPPR1hO7x0SwSeQI+cnw7VdbE=",
"jsSRI": "sha256-dLnxjV+d2rFUCtYKjbPy413/8O+Ahy7QqAhaPNlL8fk=",
"reactSRI": "sha256-S0lp+k7zWUMk2ixteM6HZvu8L9Eh//OVrt+ZfbCpmgY=",
"reactDOMSRI": "sha256-IXWO0ITNDjfnNXIu5POVfqlgYoop36bDzhodR6LW5Pc=",
})
if err != nil {
panic(err)

View File

@ -1,3 +1,3 @@
package graphql
const Version = "v0.17.34"
const Version = "v0.17.35"

View File

@ -144,7 +144,7 @@ func (f *federation) InjectSourceEarly() *ast.Source {
}
}
// InjectSources creates a GraphQL Entity type with all
// InjectSourceLate creates a GraphQL Entity type with all
// the fields that had the @key directive
func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source {
f.setEntities(schema)

View File

@ -249,7 +249,7 @@ cache = certmagic.NewCache(certmagic.CacheOptions{
// Here we use New to get a valid Config associated with the same cache.
// The provided Config is used as a template and will be completed with
// any defaults that are set in the Default config.
return certmagic.New(cache, &certmagic.config{
return certmagic.New(cache, certmagic.Config{
// ...
}), nil
},
@ -267,7 +267,7 @@ myACME := certmagic.NewACMEIssuer(magic, certmagic.ACMEIssuer{
// plus any other customizations you need
})
magic.Issuer = myACME
magic.Issuers = []certmagic.Issuer{myACME}
// this obtains certificates or renews them if necessary
err := magic.ManageSync(context.TODO(), []string{"example.com", "sub.example.com"})

View File

@ -299,7 +299,7 @@ func (iss *ACMEIssuer) isAgreed() bool {
// batch is eligible for certificates if using Let's Encrypt.
// It also ensures that an email address is available.
func (am *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error {
publicCA := strings.Contains(am.CA, "api.letsencrypt.org") || strings.Contains(am.CA, "acme.zerossl.com")
publicCA := strings.Contains(am.CA, "api.letsencrypt.org") || strings.Contains(am.CA, "acme.zerossl.com") || strings.Contains(am.CA, "api.pki.goog")
if publicCA {
for _, name := range names {
if !SubjectQualifiesForPublicCert(name) {

View File

@ -48,7 +48,8 @@ import (
// differently.
type Cache struct {
// User configuration of the cache
options CacheOptions
options CacheOptions
optionsMu sync.RWMutex
// The cache is keyed by certificate hash
cache map[string]Certificate
@ -56,7 +57,7 @@ type Cache struct {
// cacheIndex is a map of SAN to cache key (cert hash)
cacheIndex map[string][]string
// Protects the cache and index maps
// Protects the cache and cacheIndex maps
mu sync.RWMutex
// Close this channel to cancel asset maintenance
@ -128,6 +129,12 @@ func NewCache(opts CacheOptions) *Cache {
return c
}
func (certCache *Cache) SetOptions(opts CacheOptions) {
certCache.optionsMu.Lock()
certCache.options = opts
certCache.optionsMu.Unlock()
}
// Stop stops the maintenance goroutine for
// certificates in certCache. It blocks until
// stopping is complete. Once a cache is
@ -226,7 +233,11 @@ func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
// if the cache is at capacity, make room for new cert
cacheSize := len(certCache.cache)
if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity {
certCache.optionsMu.RLock()
atCapacity := certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity
certCache.optionsMu.RUnlock()
if atCapacity {
// Go maps are "nondeterministic" but not actually random,
// so although we could just chop off the "front" of the
// map with less code, that is a heavily skewed eviction
@ -256,6 +267,7 @@ func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.hash)
}
certCache.optionsMu.RLock()
certCache.logger.Debug("added certificate to cache",
zap.Strings("subjects", cert.Names),
zap.Time("expiration", expiresAt(cert.Leaf)),
@ -264,6 +276,7 @@ func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
zap.String("hash", cert.hash),
zap.Int("cache_size", len(certCache.cache)),
zap.Int("cache_capacity", certCache.options.Capacity))
certCache.optionsMu.RUnlock()
}
// removeCertificate removes cert from the cache.
@ -290,6 +303,7 @@ func (certCache *Cache) removeCertificate(cert Certificate) {
// delete the actual cert from the cache
delete(certCache.cache, cert.hash)
certCache.optionsMu.RLock()
certCache.logger.Debug("removed certificate from cache",
zap.Strings("subjects", cert.Names),
zap.Time("expiration", expiresAt(cert.Leaf)),
@ -298,6 +312,7 @@ func (certCache *Cache) removeCertificate(cert Certificate) {
zap.String("hash", cert.hash),
zap.Int("cache_size", len(certCache.cache)),
zap.Int("cache_capacity", certCache.options.Capacity))
certCache.optionsMu.RUnlock()
}
// replaceCertificate atomically replaces oldCert with newCert in
@ -314,11 +329,13 @@ func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) {
zap.Time("new_expiration", expiresAt(newCert.Leaf)))
}
func (certCache *Cache) getAllMatchingCerts(name string) []Certificate {
// getAllMatchingCerts returns all certificates with exactly this subject
// (wildcards are NOT expanded).
func (certCache *Cache) getAllMatchingCerts(subject string) []Certificate {
certCache.mu.RLock()
defer certCache.mu.RUnlock()
allCertKeys := certCache.cacheIndex[name]
allCertKeys := certCache.cacheIndex[subject]
certs := make([]Certificate, len(allCertKeys))
for i := range allCertKeys {
@ -339,7 +356,11 @@ func (certCache *Cache) getAllCerts() []Certificate {
}
func (certCache *Cache) getConfig(cert Certificate) (*Config, error) {
cfg, err := certCache.options.GetConfigForCert(cert)
certCache.optionsMu.RLock()
getCert := certCache.options.GetConfigForCert
certCache.optionsMu.RUnlock()
cfg, err := getCert(cert)
if err != nil {
return nil, err
}
@ -373,6 +394,33 @@ func (certCache *Cache) AllMatchingCertificates(name string) []Certificate {
return certs
}
// RemoveManaged removes managed certificates for the given subjects from the cache.
// This effectively stops maintenance of those certificates.
func (certCache *Cache) RemoveManaged(subjects []string) {
deleteQueue := make([]string, 0, len(subjects))
for _, subject := range subjects {
certs := certCache.getAllMatchingCerts(subject) // does NOT expand wildcards; exact matches only
for _, cert := range certs {
if !cert.managed {
continue
}
deleteQueue = append(deleteQueue, cert.hash)
}
}
certCache.Remove(deleteQueue)
}
// Remove removes certificates with the given hashes from the cache.
// This is effectively used to unload manually-loaded certificates.
func (certCache *Cache) Remove(hashes []string) {
certCache.mu.Lock()
for _, h := range hashes {
cert := certCache.cache[h]
certCache.removeCertificate(cert)
}
certCache.mu.Unlock()
}
var (
defaultCache *Cache
defaultCacheMu sync.Mutex

View File

@ -48,7 +48,7 @@ type Certificate struct {
// most recent OCSP response we have for this certificate.
ocsp *ocsp.Response
// The hex-encoded hash of this cert's chain's bytes.
// The hex-encoded hash of this cert's chain's DER bytes.
hash string
// Whether this certificate is under our management.
@ -64,6 +64,9 @@ func (cert Certificate) Empty() bool {
return len(cert.Certificate.Certificate) == 0
}
// Hash returns a checksum of the certificate chain's DER-encoded bytes.
func (cert Certificate) Hash() string { return cert.hash }
// NeedsRenewal returns true if the certificate is
// expiring soon (according to cfg) or has expired.
func (cert Certificate) NeedsRenewal(cfg *Config) bool {
@ -155,29 +158,32 @@ func (cfg *Config) loadManagedCertificate(ctx context.Context, domain string) (C
// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile
// and keyFile, which must be in PEM format. It stores the certificate in
// the in-memory cache.
// the in-memory cache and returns the hash, useful for removing from the cache.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedCertificatePEMFile(ctx context.Context, certFile, keyFile string, tags []string) error {
func (cfg *Config) CacheUnmanagedCertificatePEMFile(ctx context.Context, certFile, keyFile string, tags []string) (string, error) {
cert, err := cfg.makeCertificateFromDiskWithOCSP(ctx, cfg.Storage, certFile, keyFile)
if err != nil {
return err
return "", err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit(ctx, "cached_unmanaged_cert", map[string]any{"sans": cert.Names})
return nil
return cert.hash, nil
}
// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache.
// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache
//
// and returns the hash, useful for removing from the cache.
//
// It staples OCSP if possible.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls.Certificate, tags []string) error {
func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls.Certificate, tags []string) (string, error) {
var cert Certificate
err := fillCertFromLeaf(&cert, tlsCert)
if err != nil {
return err
return "", err
}
err = stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, nil)
if err != nil {
@ -186,22 +192,23 @@ func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls
cfg.emit(ctx, "cached_unmanaged_cert", map[string]any{"sans": cert.Names})
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
return nil
return cert.hash, nil
}
// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes
// of the certificate and key, then caches it in memory.
// of the certificate and key, then caches it in memory, and returns the hash,
// which is useful for removing from the cache.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedCertificatePEMBytes(ctx context.Context, certBytes, keyBytes []byte, tags []string) error {
func (cfg *Config) CacheUnmanagedCertificatePEMBytes(ctx context.Context, certBytes, keyBytes []byte, tags []string) (string, error) {
cert, err := cfg.makeCertificateWithOCSP(ctx, certBytes, keyBytes)
if err != nil {
return err
return "", err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit(ctx, "cached_unmanaged_cert", map[string]any{"sans": cert.Names})
return nil
return cert.hash, nil
}
// makeCertificateFromDiskWithOCSP makes a Certificate by loading the

View File

@ -294,17 +294,12 @@ type OnDemandConfig struct {
// that allows the same names it already passed
// into Manage) and without letting clients have
// their run of any domain names they want.
// Only enforced if len > 0.
hostAllowlist []string
}
func (o *OnDemandConfig) allowlistContains(name string) bool {
for _, n := range o.hostAllowlist {
if strings.EqualFold(n, name) {
return true
}
}
return false
// Only enforced if len > 0. (This is a map to
// avoid O(n^2) performance; when it was a slice,
// we saw a 30s CPU profile for a config managing
// 110K names where 29s was spent checking for
// duplicates. Order is not important here.)
hostAllowlist map[string]struct{}
}
// isLoopback returns true if the hostname of addr looks

View File

@ -209,7 +209,10 @@ func New(certCache *Cache, cfg Config) *Config {
if certCache == nil {
panic("a certificate cache is required")
}
if certCache.options.GetConfigForCert == nil {
certCache.optionsMu.RLock()
getConfigForCert := certCache.options.GetConfigForCert
defer certCache.optionsMu.RUnlock()
if getConfigForCert == nil {
panic("cache must have GetConfigForCert set in its options")
}
return newWithCache(certCache, cfg)
@ -278,17 +281,20 @@ func newWithCache(certCache *Cache, cfg Config) *Config {
// ManageSync causes the certificates for domainNames to be managed
// according to cfg. If cfg.OnDemand is not nil, then this simply
// whitelists the domain names and defers the certificate operations
// allowlists the domain names and defers the certificate operations
// to when they are needed. Otherwise, the certificates for each
// name are loaded from storage or obtained from the CA. If loaded
// from storage, they are renewed if they are expiring or expired.
// It then caches the certificate in memory and is prepared to serve
// them up during TLS handshakes.
// name are loaded from storage or obtained from the CA if not already
// in the cache associated with the Config. If loaded from storage,
// they are renewed if they are expiring or expired. It then caches
// the certificate in memory and is prepared to serve them up during
// TLS handshakes. To change how an already-loaded certificate is
// managed, update the cache options relating to getting a config for
// a cert.
//
// Note that name whitelisting for on-demand management only takes
// Note that name allowlisting for on-demand management only takes
// effect if cfg.OnDemand.DecisionFunc is not set (is nil); it will
// not overwrite an existing DecisionFunc, nor will it overwrite
// its decision; i.e. the implicit whitelist is only used if no
// its decision; i.e. the implicit allowlist is only used if no
// DecisionFunc is set.
//
// This method is synchronous, meaning that certificates for all
@ -348,13 +354,14 @@ func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bo
if ctx == nil {
ctx = context.Background()
}
if cfg.OnDemand != nil && cfg.OnDemand.hostAllowlist == nil {
cfg.OnDemand.hostAllowlist = make(map[string]struct{})
}
for _, domainName := range domainNames {
// if on-demand is configured, defer obtain and renew operations
if cfg.OnDemand != nil {
if !cfg.OnDemand.allowlistContains(domainName) {
cfg.OnDemand.hostAllowlist = append(cfg.OnDemand.hostAllowlist, domainName)
}
cfg.OnDemand.hostAllowlist[normalizedName(domainName)] = struct{}{}
continue
}
@ -370,6 +377,14 @@ func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bo
}
func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool) error {
// if certificate is already being managed, nothing to do; maintenance will continue
certs := cfg.certCache.getAllMatchingCerts(domainName)
for _, cert := range certs {
if cert.managed {
return nil
}
}
// first try loading existing certificate from storage
cert, err := cfg.CacheManagedCertificate(ctx, domainName)
if err != nil {
@ -449,28 +464,6 @@ func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool)
return renew()
}
// Unmanage causes the certificates for domainNames to stop being managed.
// If there are certificates for the supplied domain names in the cache, they
// are evicted from the cache.
func (cfg *Config) Unmanage(domainNames []string) {
var deleteQueue []Certificate
for _, domainName := range domainNames {
certs := cfg.certCache.AllMatchingCertificates(domainName)
for _, cert := range certs {
if !cert.managed {
continue
}
deleteQueue = append(deleteQueue, cert)
}
}
cfg.certCache.mu.Lock()
for _, cert := range deleteQueue {
cfg.certCache.removeCertificate(cert)
}
cfg.certCache.mu.Unlock()
}
// ObtainCertSync generates a new private key and obtains a certificate for
// name using cfg in the foreground; i.e. interactively and without retries.
// It stows the renewed certificate and its assets in storage if successful.

View File

@ -22,7 +22,6 @@ import (
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/json"
@ -35,6 +34,7 @@ import (
"strings"
"github.com/klauspost/cpuid/v2"
"github.com/zeebo/blake3"
"go.uber.org/zap"
"golang.org/x/net/idna"
)
@ -271,7 +271,7 @@ func (cfg *Config) loadCertResource(ctx context.Context, issuer Issuer, certName
// which is the chain of DER-encoded bytes. It returns the
// hex encoding of the hash.
func hashCertificateChain(certChain [][]byte) string {
h := sha256.New()
h := blake3.New()
for _, certInChain := range certChain {
h.Write(certInChain)
}

View File

@ -341,7 +341,9 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client
// perfectly full while still being able to load needed certs from storage.
// See https://caddy.community/t/error-tls-alert-internal-error-592-again/13272
// and caddyserver/caddy#4320.
cfg.certCache.optionsMu.RLock()
cacheCapacity := float64(cfg.certCache.options.Capacity)
cfg.certCache.optionsMu.RUnlock()
cacheAlmostFull := cacheCapacity > 0 && float64(cacheSize) >= cacheCapacity*.9
loadDynamically := cfg.OnDemand != nil || cacheAlmostFull
@ -448,8 +450,10 @@ func (cfg *Config) checkIfCertShouldBeObtained(name string, requireOnDemand bool
}
return nil
}
if len(cfg.OnDemand.hostAllowlist) > 0 && !cfg.OnDemand.allowlistContains(name) {
return fmt.Errorf("certificate for '%s' is not managed", name)
if len(cfg.OnDemand.hostAllowlist) > 0 {
if _, ok := cfg.OnDemand.hostAllowlist[name]; !ok {
return fmt.Errorf("certificate for '%s' is not managed", name)
}
}
}
return nil

View File

@ -53,8 +53,10 @@ func (certCache *Cache) maintainAssets(panicCount int) {
}
}()
certCache.optionsMu.RLock()
renewalTicker := time.NewTicker(certCache.options.RenewCheckInterval)
ocspTicker := time.NewTicker(certCache.options.OCSPCheckInterval)
certCache.optionsMu.RUnlock()
log.Info("started background certificate maintenance")

View File

@ -25,18 +25,31 @@ import (
"go.uber.org/zap"
)
// Storage is a type that implements a key-value store.
// Keys are prefix-based, with forward slash '/' as separators
// and without a leading slash.
// Storage is a type that implements a key-value store with
// basic file system (folder path) semantics. Keys use the
// forward slash '/' to separate path components and have no
// leading or trailing slashes.
//
// A "prefix" of a key is defined on a component basis,
// e.g. "a" is a prefix of "a/b" but not "ab/c".
//
// A "file" is a key with a value associated with it.
//
// A "directory" is a key with no value, but which may be
// the prefix of other keys.
//
// Keys passed into Load and Store always have "file" semantics,
// whereas "directories" are only implicit by leading up to the
// file.
//
// The Load, Delete, List, and Stat methods should return
// fs.ErrNotExist if the key does not exist.
//
// Processes running in a cluster should use the same Storage
// value (with the same configuration) in order to share
// certificates and other TLS resources with the cluster.
//
// The Load, Delete, List, and Stat methods should return
// fs.ErrNotExist if the key does not exist.
//
// Implementations of Storage must be safe for concurrent use
// Implementations of Storage MUST be safe for concurrent use
// and honor context cancellations. Methods should block until
// their operation is complete; that is, Load() should always
// return the value from the last call to Store() for a given
@ -46,36 +59,45 @@ import (
// For simplicity, this is not a streaming API and is not
// suitable for very large files.
type Storage interface {
// Locker provides atomic synchronization
// operations, making Storage safe to share.
// The use of Locker is not expected around
// every other method (Store, Load, etc.)
// as those should already be thread-safe;
// Locker is intended for custom jobs or
// transactions that need synchronization.
// Locker enables the storage backend to synchronize
// operational units of work.
//
// The use of Locker is NOT employed around every
// Storage method call (Store, Load, etc), as these
// should already be thread-safe. Locker is used for
// high-level jobs or transactions that need
// synchronization across a cluster; it's a simple
// distributed lock. For example, CertMagic uses the
// Locker interface to coordinate the obtaining of
// certificates.
Locker
// Store puts value at key.
// Store puts value at key. It creates the key if it does
// not exist and overwrites any existing value at this key.
Store(ctx context.Context, key string, value []byte) error
// Load retrieves the value at key.
Load(ctx context.Context, key string) ([]byte, error)
// Delete deletes key. An error should be
// returned only if the key still exists
// Delete deletes the named key. If the name is a
// directory (i.e. prefix of other keys), all keys
// prefixed by this key should be deleted. An error
// should be returned only if the key still exists
// when the method returns.
Delete(ctx context.Context, key string) error
// Exists returns true if the key exists
// Exists returns true if the key exists either as
// a directory (prefix to other keys) or a file,
// and there was no error checking.
Exists(ctx context.Context, key string) bool
// List returns all keys that match prefix.
// List returns all keys in the given path.
//
// If recursive is true, non-terminal keys
// will be enumerated (i.e. "directories"
// should be walked); otherwise, only keys
// prefixed exactly by prefix will be listed.
List(ctx context.Context, prefix string, recursive bool) ([]string, error)
List(ctx context.Context, path string, recursive bool) ([]string, error)
// Stat returns information about key.
Stat(ctx context.Context, key string) (KeyInfo, error)
@ -84,38 +106,46 @@ type Storage interface {
// Locker facilitates synchronization across machines and networks.
// It essentially provides a distributed named-mutex service so
// that multiple consumers can coordinate tasks and share resources.
//
// If possible, a Locker should implement a coordinated distributed
// locking mechanism by generating fencing tokens (see
// https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html).
// This typically requires a central server or consensus algorithm
// However, if that is not feasible, Lockers may implement an
// alternative mechanism that uses timeouts to detect node or network
// failures and avoid deadlocks. For example, the default FileStorage
// writes a timestamp to the lock file every few seconds, and if another
// node acquiring the lock sees that timestamp is too old, it may
// assume the lock is stale.
//
// As not all Locker implementations use fencing tokens, code relying
// upon Locker must be tolerant of some mis-synchronizations but can
// expect them to be rare.
//
// This interface should only be used for coordinating expensive
// operations across nodes in a cluster; not for internal, extremely
// short-lived, or high-contention locks.
type Locker interface {
// Lock acquires the lock for name, blocking until the lock
// can be obtained or an error is returned. Note that, even
// after acquiring a lock, an idempotent operation may have
// already been performed by another process that acquired
// the lock before - so always check to make sure idempotent
// operations still need to be performed after acquiring the
// lock.
// can be obtained or an error is returned. Only one lock
// for the given name can exist at a time. A call to Lock for
// a name which already exists blocks until the named lock
// is released or becomes stale.
//
// The actual implementation of obtaining of a lock must be
// an atomic operation so that multiple Lock calls at the
// same time always results in only one caller receiving the
// lock at any given time.
// If the named lock represents an idempotent operation, callers
// should awlays check to make sure the work still needs to be
// completed after acquiring the lock. You never know if another
// process already completed the task while you were waiting to
// acquire it.
//
// To prevent deadlocks, all implementations should put a
// reasonable expiration on the lock in case Unlock is unable
// to be called due to some sort of network failure or system
// crash. Additionally, implementations should honor context
// cancellation as much as possible (in case the caller wishes
// to give up and free resources before the lock can be obtained).
//
// Additionally, implementations may wish to support fencing
// tokens (https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html)
// in order to be robust against long process pauses, extremely
// high network latency (or other factors that get in the way of
// renewing lock leases).
// Implementations should honor context cancellation.
Lock(ctx context.Context, name string) error
// Unlock releases the lock for name. This method must ONLY be
// called after a successful call to Lock, and only after the
// critical section is finished, even if it errored or timed
// out. Unlock cleans up any resources allocated during Lock.
// Unlock releases named lock. This method must ONLY be called
// after a successful call to Lock, and only after the critical
// section is finished, even if it errored or timed out. Unlock
// cleans up any resources allocated during Lock. Unlock should
// only return an error if the lock was unable to be released.
Unlock(ctx context.Context, name string) error
}
@ -130,7 +160,7 @@ type KeyInfo struct {
Key string
Modified time.Time
Size int64
IsTerminal bool // false for keys that only contain other keys (like directories)
IsTerminal bool // false for directories (keys that act as prefix for other keys)
}
// storeTx stores all the values or none at all.

View File

@ -8,7 +8,7 @@ This project adheres to the [Contributor Covenant 1.2.](https://www.contributor-
- We do our best to have an [up-to-date documentation](https://casbin.org/docs/overview)
- [Stack Overflow](https://stackoverflow.com) is the best place to start if you have a question. Please use the [casbin tag](https://stackoverflow.com/tags/casbin/info) we are actively monitoring. We encourage you to use Stack Overflow specially for Modeling Access Control Problems, in order to build a shared knowledge base.
- You can also join our [Gitter community](https://gitter.im/casbin/Lobby).
- You can also join our [Discord](https://discord.gg/S5UjpzGZjN).
## Reporting issues

View File

@ -6,7 +6,7 @@ Casbin
[![Coverage Status](https://coveralls.io/repos/github/casbin/casbin/badge.svg?branch=master)](https://coveralls.io/github/casbin/casbin?branch=master)
[![Godoc](https://godoc.org/github.com/casbin/casbin?status.svg)](https://pkg.go.dev/github.com/casbin/casbin/v2)
[![Release](https://img.shields.io/github/release/casbin/casbin.svg)](https://github.com/casbin/casbin/releases/latest)
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/casbin/lobby)
[![Discord](https://img.shields.io/discord/1022748306096537660?logo=discord&label=discord&color=5865F2)](https://discord.gg/S5UjpzGZjN)
[![Sourcegraph](https://sourcegraph.com/github.com/casbin/casbin/-/badge.svg)](https://sourcegraph.com/github.com/casbin/casbin?badge)
💖 [**Looking for an open-source identity and access management solution like Okta, Auth0, Keycloak ? Learn more about: Casdoor**](https://casdoor.org/)

View File

@ -17,11 +17,11 @@ package casbin
import (
"errors"
"fmt"
"regexp"
"runtime/debug"
"strings"
"sync"
"github.com/Knetic/govaluate"
"github.com/casbin/casbin/v2/effector"
"github.com/casbin/casbin/v2/log"
"github.com/casbin/casbin/v2/model"
@ -30,6 +30,9 @@ import (
"github.com/casbin/casbin/v2/rbac"
defaultrolemanager "github.com/casbin/casbin/v2/rbac/default-role-manager"
"github.com/casbin/casbin/v2/util"
"github.com/Knetic/govaluate"
"github.com/tidwall/gjson"
)
// Enforcer is the main interface for authorization enforcement and policy management.
@ -50,6 +53,7 @@ type Enforcer struct {
autoBuildRoleLinks bool
autoNotifyWatcher bool
autoNotifyDispatcher bool
acceptJsonRequest bool
logger log.Logger
}
@ -476,6 +480,11 @@ func (e *Enforcer) EnableAutoBuildRoleLinks(autoBuildRoleLinks bool) {
e.autoBuildRoleLinks = autoBuildRoleLinks
}
// EnableAcceptJsonRequest controls whether to accept json as a request parameter
func (e *Enforcer) EnableAcceptJsonRequest(acceptJsonRequest bool) {
e.acceptJsonRequest = acceptJsonRequest
}
// BuildRoleLinks manually rebuild the role inheritance relations.
func (e *Enforcer) BuildRoleLinks() error {
for _, rm := range e.rmMap {
@ -564,6 +573,10 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac
pTokens[token] = i
}
if e.acceptJsonRequest {
expString = requestJsonReplace(expString, rTokens, rvals)
}
parameters := enforceParameters{
rTokens: rTokens,
rVals: rvals,
@ -609,7 +622,16 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac
pvals)
}
parameters.pVals = pvals
if e.acceptJsonRequest {
pvalsCopy := make([]string, len(pvals))
copy(pvalsCopy, pvals)
for i, pStr := range pvalsCopy {
pvalsCopy[i] = requestJsonReplace(pStr, rTokens, rvals)
}
parameters.pVals = pvalsCopy
} else {
parameters.pVals = pvals
}
result, err := expression.Eval(parameters)
// log.LogPrint("Result: ", result)
@ -646,9 +668,9 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac
policyEffects[policyIndex] = effector.Allow
}
//if e.model["e"]["e"].Value == "priority(p_eft) || deny" {
// if e.model["e"]["e"].Value == "priority(p_eft) || deny" {
// break
//}
// }
effect, explainIndex, err = e.eft.MergeEffects(e.model["e"][eType].Value, policyEffects, matcherResults, policyIndex, policyLen)
if err != nil {
@ -711,6 +733,31 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac
return result, nil
}
var requestObjectRegex = regexp.MustCompile(`r[_.][A-Za-z_0-9]+\.[A-Za-z_0-9.]+[A-Za-z_0-9]`)
var requestObjectRegexPrefix = regexp.MustCompile(`r[_.][A-Za-z_0-9]+\.`)
// requestJsonReplace used to support request parameters of type json
// It will replace the access of the request object in matchers or policy with the actual value in the request json parameter
// For example: request sub = `{"Owner": "alice", "Age": 30}`
// policy: p, r.sub.Age > 18, /data1, read ==> p, 30 > 18, /data1, read
// matchers: m = r.sub == r.obj.Owner ==> m = r.sub == "alice"
func requestJsonReplace(str string, rTokens map[string]int, rvals []interface{}) string {
matches := requestObjectRegex.FindStringSubmatch(str)
for _, matchesStr := range matches {
prefix := requestObjectRegexPrefix.FindString(matchesStr)
jsonPath := strings.TrimPrefix(matchesStr, prefix)
tokenIndex := rTokens[prefix[:len(prefix)-1]]
if jsonStr, ok := rvals[tokenIndex].(string); ok {
newStr := gjson.Get(jsonStr, jsonPath).String()
if !util.IsNumeric(newStr) {
newStr = `"` + newStr + `"`
}
str = strings.Replace(str, matchesStr, newStr, -1)
}
}
return str
}
func (e *Enforcer) getAndStoreMatcherExpression(hasEval bool, expString string, functions map[string]govaluate.ExpressionFunction) (*govaluate.EvaluableExpression, error) {
var expression *govaluate.EvaluableExpression
var err error

View File

@ -25,6 +25,12 @@ var evalReg = regexp.MustCompile(`\beval\((?P<rule>[^)]*)\)`)
var escapeAssertionRegex = regexp.MustCompile(`\b((r|p)[0-9]*)\.`)
var numericRegex = regexp.MustCompile(`^-?\d+(?:\.\d+)?$`)
func IsNumeric(s string) bool {
return numericRegex.MatchString(s)
}
// EscapeAssertion escapes the dots in the assertion, because the expression evaluation doesn't support such variable names.
func EscapeAssertion(s string) string {
s = escapeAssertionRegex.ReplaceAllStringFunc(s, func(m string) string {

View File

@ -7,15 +7,28 @@ import (
type ClusterNode struct {
ID string `json:"id"`
Name string `json:"name"`
Version string `json:"version"`
Status string `json:"status"`
Error string `json:"error"`
Voter bool `json:"voter"`
Leader bool `json:"leader"`
Address string `json:"address"`
CreatedAt string `json:"created_at"`
Uptime int64 `json:"uptime_seconds"`
LastContact int64 `json:"last_contact"` // unix timestamp
Latency float64 `json:"latency_ms"` // milliseconds
State string `json:"state"`
CreatedAt string `json:"created_at"` // RFC 3339
Uptime int64 `json:"uptime_seconds"` // seconds
LastContact float64 `json:"last_contact_ms"` // milliseconds
Latency float64 `json:"latency_ms"` // milliseconds
Core ClusterNodeCore `json:"core"`
Resources ClusterNodeResources `json:"resources"`
}
type ClusterNodeCore struct {
Address string `json:"address"`
Status string `json:"status"`
Error string `json:"error"`
LastContact float64 `json:"last_contact_ms"` // milliseconds
Latency float64 `json:"latency_ms"` // milliseconds
}
type ClusterNodeResources struct {
IsThrottling bool `json:"is_throttling"`
NCPU float64 `json:"ncpu"`
@ -25,52 +38,30 @@ type ClusterNodeResources struct {
MemLimit uint64 `json:"memory_limit_bytes"` // bytes
}
type ClusterNodeFiles struct {
LastUpdate int64 `json:"last_update"` // unix timestamp
Files map[string][]string `json:"files"`
}
type ClusterRaftServer struct {
ID string `json:"id"`
Address string `json:"address"` // raft address
Voter bool `json:"voter"`
Leader bool `json:"leader"`
}
type ClusterRaftStats struct {
State string `json:"state"`
LastContact float64 `json:"last_contact_ms"`
NumPeers uint64 `json:"num_peers"`
}
type ClusterRaft struct {
Server []ClusterRaftServer `json:"server"`
Stats ClusterRaftStats `json:"stats"`
Address string `json:"address"`
State string `json:"state"`
LastContact float64 `json:"last_contact_ms"` // milliseconds
NumPeers uint64 `json:"num_peers"`
LogTerm uint64 `json:"log_term"`
LogIndex uint64 `json:"log_index"`
}
type ClusterAbout struct {
ID string `json:"id"`
Address string `json:"address"`
ClusterAPIAddress string `json:"cluster_api_address"`
CoreAPIAddress string `json:"core_api_address"`
Raft ClusterRaft `json:"raft"`
Nodes []ClusterNode `json:"nodes"`
Version string `json:"version"`
Degraded bool `json:"degraded"`
DegradedErr string `json:"degraded_error"`
ID string `json:"id"`
Name string `json:"name"`
Leader bool `json:"leader"`
Address string `json:"address"`
Raft ClusterRaft `json:"raft"`
Nodes []ClusterNode `json:"nodes"`
Version string `json:"version"`
Degraded bool `json:"degraded"`
DegradedErr string `json:"degraded_error"`
}
type ClusterProcess struct {
ID string `json:"id"`
Owner string `json:"owner"`
Domain string `json:"domain"`
NodeID string `json:"node_id"`
Reference string `json:"reference"`
Order string `json:"order"`
State string `json:"state"`
CPU float64 `json:"cpu" swaggertype:"number" jsonschema:"type=number"` // percent 0-100*ncpu
Memory uint64 `json:"memory_bytes"` // bytes
Runtime int64 `json:"runtime_seconds"` // seconds
type ClusterNodeFiles struct {
LastUpdate int64 `json:"last_update"` // unix timestamp
Files map[string][]string `json:"files"`
}
type ClusterLock struct {
@ -84,3 +75,5 @@ type ClusterKVSValue struct {
}
type ClusterKVS map[string]ClusterKVSValue
type ClusterProcessMap map[string]string

View File

@ -7,6 +7,7 @@ type Process struct {
Domain string `json:"domain"`
Type string `json:"type" jsonschema:"enum=ffmpeg"`
Reference string `json:"reference"`
CoreID string `json:"core_id"`
CreatedAt int64 `json:"created_at" jsonschema:"minimum=0" format:"int64"` // Unix timestamp
UpdatedAt int64 `json:"updated_at" jsonschema:"minimum=0" format:"int64"` // Unix timestamp
Config *ProcessConfig `json:"config,omitempty"`

View File

@ -99,10 +99,11 @@ type RestClient interface {
IdentitySetPolicies(name string, p []api.IAMPolicy) error // PUT /v3/iam/user/{name}/policy
IdentityDelete(name string) error // DELETE /v3/iam/user/{name}
Cluster() (api.ClusterAbout, error) // GET /v3/cluster
ClusterHealthy() (bool, error) // GET /v3/cluster/healthy
ClusterSnapshot() (io.ReadCloser, error) // GET /v3/cluster/snapshot
ClusterLeave() error // PUT /v3/cluster/leave
Cluster() (api.ClusterAbout, error) // GET /v3/cluster
ClusterHealthy() (bool, error) // GET /v3/cluster/healthy
ClusterSnapshot() (io.ReadCloser, error) // GET /v3/cluster/snapshot
ClusterLeave() error // PUT /v3/cluster/leave
ClusterTransferLeadership(id string) error // PUT /v3/cluster/transfer/{id}
ClusterNodeList() ([]api.ClusterNode, error) // GET /v3/cluster/node
ClusterNode(id string) (api.ClusterNode, error) // GET /v3/cluster/node/{id}
@ -110,13 +111,14 @@ type RestClient interface {
ClusterNodeProcessList(id string, opts ProcessListOptions) ([]api.Process, error) // GET /v3/cluster/node/{id}/process
ClusterNodeVersion(id string) (api.Version, error) // GET /v3/cluster/node/{id}/version
ClusterDBProcessList() ([]api.Process, error) // GET /v3/cluster/db/process
ClusterDBProcess(id ProcessID) (api.Process, error) // GET /v3/cluster/db/process/{id}
ClusterDBUserList() ([]api.IAMUser, error) // GET /v3/cluster/db/user
ClusterDBUser(name string) (api.IAMUser, error) // GET /v3/cluster/db/user/{name}
ClusterDBPolicies() ([]api.IAMPolicy, error) // GET /v3/cluster/db/policies
ClusterDBLocks() ([]api.ClusterLock, error) // GET /v3/cluster/db/locks
ClusterDBKeyValues() (api.ClusterKVS, error) // GET /v3/cluster/db/kv
ClusterDBProcessList() ([]api.Process, error) // GET /v3/cluster/db/process
ClusterDBProcess(id ProcessID) (api.Process, error) // GET /v3/cluster/db/process/{id}
ClusterDBUserList() ([]api.IAMUser, error) // GET /v3/cluster/db/user
ClusterDBUser(name string) (api.IAMUser, error) // GET /v3/cluster/db/user/{name}
ClusterDBPolicies() ([]api.IAMPolicy, error) // GET /v3/cluster/db/policies
ClusterDBLocks() ([]api.ClusterLock, error) // GET /v3/cluster/db/locks
ClusterDBKeyValues() (api.ClusterKVS, error) // GET /v3/cluster/db/kv
ClusterDBProcessMap() (api.ClusterProcessMap, error) // GET /v3/cluster/db/map/process
ClusterProcessList(opts ProcessListOptions) ([]api.Process, error) // GET /v3/cluster/process
ClusterProcess(id ProcessID, filter []string) (api.Process, error) // POST /v3/cluster/process
@ -415,6 +417,10 @@ func New(config Config) (RestClient, error) {
path: mustNewGlob("/v3/cluster/node/*/version"),
constraint: mustNewConstraint("^16.14.0"),
},
{
path: mustNewGlob("/v3/cluster/db/map/process"),
constraint: mustNewConstraint("^16.14.0"),
},
},
"POST": {
{
@ -471,6 +477,10 @@ func New(config Config) (RestClient, error) {
path: mustNewGlob("/v3/session/token/*"),
constraint: mustNewConstraint("^16.14.0"),
},
{
path: mustNewGlob("/v3/cluster/transfer/*"),
constraint: mustNewConstraint("^16.14.0"),
},
},
"DELETE": {
{

View File

@ -3,6 +3,7 @@ package coreclient
import (
"encoding/json"
"io"
"net/url"
"github.com/datarhei/core-client-go/v16/api"
)
@ -42,3 +43,9 @@ func (r *restclient) ClusterLeave() error {
return err
}
func (r *restclient) ClusterTransferLeadership(id string) error {
_, err := r.call("PUT", "/v3/cluster/transfer/"+url.PathEscape(id), nil, nil, "", nil)
return err
}

View File

@ -100,3 +100,16 @@ func (r *restclient) ClusterDBKeyValues() (api.ClusterKVS, error) {
return kvs, err
}
func (r *restclient) ClusterDBProcessMap() (api.ClusterProcessMap, error) {
var m api.ClusterProcessMap
data, err := r.call("GET", "/v3/cluster/db/map/process", nil, nil, "", nil)
if err != nil {
return m, err
}
err = json.Unmarshal(data, &m)
return m, err
}

View File

@ -26,6 +26,7 @@
package jsonpointer
import (
"encoding/json"
"errors"
"fmt"
"reflect"
@ -40,6 +41,7 @@ const (
pointerSeparator = `/`
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
notFound = `Can't find the pointer in the document`
)
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
// JSONPointable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONPointable interface {
JSONLookup(string) (interface{}, error)
JSONLookup(string) (any, error)
}
// JSONSetable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONSetable interface {
JSONSet(string, interface{}) error
JSONSet(string, any) error
}
// New creates a new json pointer for the given string
@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
err = errors.New(invalidStart)
} else {
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
for _, referenceToken := range referenceTokens[1:] {
p.referenceTokens = append(p.referenceTokens, referenceToken)
}
p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
}
}
@ -91,26 +91,26 @@ func (p *Pointer) parse(jsonPointerString string) error {
}
// Get uses the pointer to retrieve a value from a JSON document
func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
return p.get(document, swag.DefaultJSONNameProvider)
}
// Set uses the pointer to set a value from a JSON document
func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
func (p *Pointer) Set(document any, value any) (any, error) {
return document, p.set(document, value, swag.DefaultJSONNameProvider)
}
// GetForToken gets a value for a json pointer token 1 level deep
func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
}
// SetForToken gets a value for a json pointer token 1 level deep
func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
@ -159,7 +159,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
rValue := reflect.Indirect(reflect.ValueOf(node))
if ns, ok := node.(JSONSetable); ok { // pointer impl
@ -210,7 +210,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
}
func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
if nameProvider == nil {
nameProvider = swag.DefaultJSONNameProvider
@ -241,7 +241,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
return node, kind, nil
}
func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
@ -363,6 +363,127 @@ func (p *Pointer) String() string {
return pointerString
}
func (p *Pointer) Offset(document string) (int64, error) {
dec := json.NewDecoder(strings.NewReader(document))
var offset int64
for _, ttk := range p.DecodedTokens() {
tk, err := dec.Token()
if err != nil {
return 0, err
}
switch tk := tk.(type) {
case json.Delim:
switch tk {
case '{':
offset, err = offsetSingleObject(dec, ttk)
if err != nil {
return 0, err
}
case '[':
offset, err = offsetSingleArray(dec, ttk)
if err != nil {
return 0, err
}
default:
return 0, fmt.Errorf("invalid token %#v", tk)
}
default:
return 0, fmt.Errorf("invalid token %#v", tk)
}
}
return offset, nil
}
func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
for dec.More() {
offset := dec.InputOffset()
tk, err := dec.Token()
if err != nil {
return 0, err
}
switch tk := tk.(type) {
case json.Delim:
switch tk {
case '{':
if err := drainSingle(dec); err != nil {
return 0, err
}
case '[':
if err := drainSingle(dec); err != nil {
return 0, err
}
}
case string:
if tk == decodedToken {
return offset, nil
}
default:
return 0, fmt.Errorf("invalid token %#v", tk)
}
}
return 0, fmt.Errorf("token reference %q not found", decodedToken)
}
func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
idx, err := strconv.Atoi(decodedToken)
if err != nil {
return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
}
var i int
for i = 0; i < idx && dec.More(); i++ {
tk, err := dec.Token()
if err != nil {
return 0, err
}
switch tk := tk.(type) {
case json.Delim:
switch tk {
case '{':
if err := drainSingle(dec); err != nil {
return 0, err
}
case '[':
if err := drainSingle(dec); err != nil {
return 0, err
}
}
}
}
if !dec.More() {
return 0, fmt.Errorf("token reference %q not found", decodedToken)
}
return dec.InputOffset(), nil
}
// drainSingle drains a single level of object or array.
// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed.
func drainSingle(dec *json.Decoder) error {
for dec.More() {
tk, err := dec.Token()
if err != nil {
return err
}
switch tk := tk.(type) {
case json.Delim:
switch tk {
case '{':
if err := drainSingle(dec); err != nil {
return err
}
case '[':
if err := drainSingle(dec); err != nil {
return err
}
}
}
}
// Consumes the ending delim
if _, err := dec.Token(); err != nil {
return err
}
return nil
}
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /

View File

@ -1,5 +1,38 @@
# Changelog
## v4.11.1 - 2023-07-16
**Fixes**
* Fix `Gzip` middleware not sending response code for no content responses (404, 301/302 redirects etc) [#2481](https://github.com/labstack/echo/pull/2481)
## v4.11.0 - 2023-07-14
**Fixes**
* Fixes the proxy middleware concurrency issue of calling the Next() proxy target on Round Robin Balancer [#2409](https://github.com/labstack/echo/pull/2409)
* Fix `group.RouteNotFound` not working when group has attached middlewares [#2411](https://github.com/labstack/echo/pull/2411)
* Fix global error handler return error message when message is an error [#2456](https://github.com/labstack/echo/pull/2456)
* Do not use global timeNow variables [#2477](https://github.com/labstack/echo/pull/2477)
**Enhancements**
* Added a optional config variable to disable centralized error handler in recovery middleware [#2410](https://github.com/labstack/echo/pull/2410)
* refactor: use `strings.ReplaceAll` directly [#2424](https://github.com/labstack/echo/pull/2424)
* Add support for Go1.20 `http.rwUnwrapper` to Response struct [#2425](https://github.com/labstack/echo/pull/2425)
* Check whether is nil before invoking centralized error handling [#2429](https://github.com/labstack/echo/pull/2429)
* Proper colon support in `echo.Reverse` method [#2416](https://github.com/labstack/echo/pull/2416)
* Fix misuses of a vs an in documentation comments [#2436](https://github.com/labstack/echo/pull/2436)
* Add link to slog.Handler library for Echo logging into README.md [#2444](https://github.com/labstack/echo/pull/2444)
* In proxy middleware Support retries of failed proxy requests [#2414](https://github.com/labstack/echo/pull/2414)
* gofmt fixes to comments [#2452](https://github.com/labstack/echo/pull/2452)
* gzip response only if it exceeds a minimal length [#2267](https://github.com/labstack/echo/pull/2267)
* Upgrade packages [#2475](https://github.com/labstack/echo/pull/2475)
## v4.10.2 - 2023-02-22
**Security**

View File

@ -110,6 +110,7 @@ of middlewares in this list.
| [github.com/swaggo/echo-swagger](https://github.com/swaggo/echo-swagger) | Automatically generate RESTful API documentation with [Swagger](https://swagger.io/) 2.0. |
| [github.com/ziflex/lecho](https://github.com/ziflex/lecho) | [Zerolog](https://github.com/rs/zerolog) logging library wrapper for Echo logger interface. |
| [github.com/brpaz/echozap](https://github.com/brpaz/echozap) | Uber´s [Zap](https://github.com/uber-go/zap) logging library wrapper for Echo logger interface. |
| [github.com/samber/slog-echo](https://github.com/samber/slog-echo) | Go [slog](https://pkg.go.dev/golang.org/x/exp/slog) logging library wrapper for Echo logger interface. |
| [github.com/darkweak/souin/plugins/echo](https://github.com/darkweak/souin/tree/master/plugins/echo) | HTTP cache system based on [Souin](https://github.com/darkweak/souin) to automatically get your endpoints cached. It supports some distributed and non-distributed storage systems depending your needs. |
| [github.com/mikestefanello/pagoda](https://github.com/mikestefanello/pagoda) | Rapid, easy full-stack web development starter kit built with Echo. |
| [github.com/go-woo/protoc-gen-echo](https://github.com/go-woo/protoc-gen-echo) | ProtoBuf generate Echo server side code |

View File

@ -114,7 +114,7 @@ func (b *DefaultBinder) Bind(i interface{}, c Context) (err error) {
// Only bind query parameters for GET/DELETE/HEAD to avoid unexpected behavior with destination struct binding from body.
// For example a request URL `&id=1&lang=en` with body `{"id":100,"lang":"de"}` would lead to precedence issues.
// The HTTP method check restores pre-v4.1.11 behavior to avoid these problems (see issue #1670)
method := c.Request().Method
method := c.Request().Method
if method == http.MethodGet || method == http.MethodDelete || method == http.MethodHead {
if err = b.BindQueryParams(c, i); err != nil {
return err

View File

@ -1236,7 +1236,7 @@ func (b *ValueBinder) durations(sourceParam string, values []string, dest *[]tim
// Example: 1609180603 bind to 2020-12-28T18:36:43.000000000+00:00
//
// Note:
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
func (b *ValueBinder) UnixTime(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, false, time.Second)
}
@ -1247,7 +1247,7 @@ func (b *ValueBinder) UnixTime(sourceParam string, dest *time.Time) *ValueBinder
// Example: 1609180603 bind to 2020-12-28T18:36:43.000000000+00:00
//
// Note:
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
func (b *ValueBinder) MustUnixTime(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, true, time.Second)
}
@ -1257,7 +1257,7 @@ func (b *ValueBinder) MustUnixTime(sourceParam string, dest *time.Time) *ValueBi
// Example: 1647184410140 bind to 2022-03-13T15:13:30.140000000+00:00
//
// Note:
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
func (b *ValueBinder) UnixTimeMilli(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, false, time.Millisecond)
}
@ -1268,7 +1268,7 @@ func (b *ValueBinder) UnixTimeMilli(sourceParam string, dest *time.Time) *ValueB
// Example: 1647184410140 bind to 2022-03-13T15:13:30.140000000+00:00
//
// Note:
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
func (b *ValueBinder) MustUnixTimeMilli(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, true, time.Millisecond)
}
@ -1280,8 +1280,8 @@ func (b *ValueBinder) MustUnixTimeMilli(sourceParam string, dest *time.Time) *Va
// Example: 999999999 binds to 1970-01-01T00:00:00.999999999+00:00
//
// Note:
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
// * Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
// - Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
func (b *ValueBinder) UnixTimeNano(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, false, time.Nanosecond)
}
@ -1294,8 +1294,8 @@ func (b *ValueBinder) UnixTimeNano(sourceParam string, dest *time.Time) *ValueBi
// Example: 999999999 binds to 1970-01-01T00:00:00.999999999+00:00
//
// Note:
// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
// * Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
// - Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
func (b *ValueBinder) MustUnixTimeNano(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, true, time.Nanosecond)
}

View File

@ -100,8 +100,8 @@ type (
// Set saves data in the context.
Set(key string, val interface{})
// Bind binds the request body into provided type `i`. The default binder
// does it based on Content-Type header.
// Bind binds path params, query params and the request body into provided type `i`. The default binder
// binds body based on Content-Type header.
Bind(i interface{}) error
// Validate validates provided `i`. It is usually called after `Context#Bind()`.

View File

@ -39,6 +39,7 @@ package echo
import (
stdContext "context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
@ -258,7 +259,7 @@ const (
const (
// Version of Echo
Version = "4.10.2"
Version = "4.11.1"
website = "https://echo.labstack.com"
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
banner = `
@ -438,12 +439,18 @@ func (e *Echo) DefaultHTTPErrorHandler(err error, c Context) {
// Issue #1426
code := he.Code
message := he.Message
if m, ok := he.Message.(string); ok {
switch m := he.Message.(type) {
case string:
if e.Debug {
message = Map{"message": m, "error": err.Error()}
} else {
message = Map{"message": m}
}
case json.Marshaler:
// do nothing - this type knows how to format itself to JSON
case error:
message = Map{"message": m.Error()}
}
// Send response
@ -614,7 +621,7 @@ func (e *Echo) URL(h HandlerFunc, params ...interface{}) string {
return e.URI(h, params...)
}
// Reverse generates an URL from route name and provided parameters.
// Reverse generates a URL from route name and provided parameters.
func (e *Echo) Reverse(name string, params ...interface{}) string {
return e.router.Reverse(name, params...)
}

View File

@ -23,10 +23,12 @@ func (g *Group) Use(middleware ...MiddlewareFunc) {
if len(g.middleware) == 0 {
return
}
// Allow all requests to reach the group as they might get dropped if router
// doesn't find a match, making none of the group middleware process.
g.Any("", NotFoundHandler)
g.Any("/*", NotFoundHandler)
// group level middlewares are different from Echo `Pre` and `Use` middlewares (those are global). Group level middlewares
// are only executed if they are added to the Router with route.
// So we register catch all route (404 is a safe way to emulate route match) for this group and now during routing the
// Router would find route to match our request path and therefore guarantee the middleware(s) will get executed.
g.RouteNotFound("", NotFoundHandler)
g.RouteNotFound("/*", NotFoundHandler)
}
// CONNECT implements `Echo#CONNECT()` for sub-routes within the Group.

View File

@ -2,9 +2,9 @@ package middleware
import (
"encoding/base64"
"net/http"
"strconv"
"strings"
"net/http"
"github.com/labstack/echo/v4"
)

View File

@ -2,6 +2,7 @@ package middleware
import (
"bufio"
"bytes"
"compress/gzip"
"io"
"net"
@ -21,12 +22,30 @@ type (
// Gzip compression level.
// Optional. Default value -1.
Level int `yaml:"level"`
// Length threshold before gzip compression is applied.
// Optional. Default value 0.
//
// Most of the time you will not need to change the default. Compressing
// a short response might increase the transmitted data because of the
// gzip format overhead. Compressing the response will also consume CPU
// and time on the server and the client (for decompressing). Depending on
// your use case such a threshold might be useful.
//
// See also:
// https://webmasters.stackexchange.com/questions/31750/what-is-recommended-minimum-object-size-for-gzip-performance-benefits
MinLength int
}
gzipResponseWriter struct {
io.Writer
http.ResponseWriter
wroteBody bool
wroteHeader bool
wroteBody bool
minLength int
minLengthExceeded bool
buffer *bytes.Buffer
code int
}
)
@ -37,8 +56,9 @@ const (
var (
// DefaultGzipConfig is the default Gzip middleware config.
DefaultGzipConfig = GzipConfig{
Skipper: DefaultSkipper,
Level: -1,
Skipper: DefaultSkipper,
Level: -1,
MinLength: 0,
}
)
@ -58,8 +78,12 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
if config.Level == 0 {
config.Level = DefaultGzipConfig.Level
}
if config.MinLength < 0 {
config.MinLength = DefaultGzipConfig.MinLength
}
pool := gzipCompressPool(config)
bpool := bufferPool()
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
@ -70,7 +94,6 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
res := c.Response()
res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
if strings.Contains(c.Request().Header.Get(echo.HeaderAcceptEncoding), gzipScheme) {
res.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
i := pool.Get()
w, ok := i.(*gzip.Writer)
if !ok {
@ -78,19 +101,38 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
}
rw := res.Writer
w.Reset(rw)
grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw}
buf := bpool.Get().(*bytes.Buffer)
buf.Reset()
grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf}
defer func() {
// There are different reasons for cases when we have not yet written response to the client and now need to do so.
// a) handler response had only response code and no response body (ala 404 or redirects etc). Response code need to be written now.
// b) body is shorter than our minimum length threshold and being buffered currently and needs to be written
if !grw.wroteBody {
if res.Header().Get(echo.HeaderContentEncoding) == gzipScheme {
res.Header().Del(echo.HeaderContentEncoding)
}
if grw.wroteHeader {
rw.WriteHeader(grw.code)
}
// We have to reset response to it's pristine state when
// nothing is written to body or error is returned.
// See issue #424, #407.
res.Writer = rw
w.Reset(io.Discard)
} else if !grw.minLengthExceeded {
// Write uncompressed response
res.Writer = rw
if grw.wroteHeader {
grw.ResponseWriter.WriteHeader(grw.code)
}
grw.buffer.WriteTo(rw)
w.Reset(io.Discard)
}
w.Close()
bpool.Put(buf)
pool.Put(w)
}()
res.Writer = grw
@ -102,7 +144,11 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
func (w *gzipResponseWriter) WriteHeader(code int) {
w.Header().Del(echo.HeaderContentLength) // Issue #444
w.ResponseWriter.WriteHeader(code)
w.wroteHeader = true
// Delay writing of the header until we know if we'll actually compress the response
w.code = code
}
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
@ -110,10 +156,40 @@ func (w *gzipResponseWriter) Write(b []byte) (int, error) {
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
}
w.wroteBody = true
if !w.minLengthExceeded {
n, err := w.buffer.Write(b)
if w.buffer.Len() >= w.minLength {
w.minLengthExceeded = true
// The minimum length is exceeded, add Content-Encoding header and write the header
w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
if w.wroteHeader {
w.ResponseWriter.WriteHeader(w.code)
}
return w.Writer.Write(w.buffer.Bytes())
}
return n, err
}
return w.Writer.Write(b)
}
func (w *gzipResponseWriter) Flush() {
if !w.minLengthExceeded {
// Enforce compression because we will not know how much more data will come
w.minLengthExceeded = true
w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
if w.wroteHeader {
w.ResponseWriter.WriteHeader(w.code)
}
w.Writer.Write(w.buffer.Bytes())
}
w.Writer.(*gzip.Writer).Flush()
if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
@ -142,3 +218,12 @@ func gzipCompressPool(config GzipConfig) sync.Pool {
},
}
}
func bufferPool() sync.Pool {
return sync.Pool{
New: func() interface{} {
b := &bytes.Buffer{}
return b
},
}
}

View File

@ -150,8 +150,8 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc {
allowOriginPatterns := []string{}
for _, origin := range config.AllowOrigins {
pattern := regexp.QuoteMeta(origin)
pattern = strings.Replace(pattern, "\\*", ".*", -1)
pattern = strings.Replace(pattern, "\\?", ".", -1)
pattern = strings.ReplaceAll(pattern, "\\*", ".*")
pattern = strings.ReplaceAll(pattern, "\\?", ".")
pattern = "^" + pattern + "$"
allowOriginPatterns = append(allowOriginPatterns, pattern)
}

View File

@ -20,7 +20,7 @@ type (
}
)
//GZIPEncoding content-encoding header if set to "gzip", decompress body contents.
// GZIPEncoding content-encoding header if set to "gzip", decompress body contents.
const GZIPEncoding string = "gzip"
// Decompressor is used to get the sync.Pool used by the middleware to get Gzip readers
@ -44,12 +44,12 @@ func (d *DefaultGzipDecompressPool) gzipDecompressPool() sync.Pool {
return sync.Pool{New: func() interface{} { return new(gzip.Reader) }}
}
//Decompress decompresses request body based if content encoding type is set to "gzip" with default config
// Decompress decompresses request body based if content encoding type is set to "gzip" with default config
func Decompress() echo.MiddlewareFunc {
return DecompressWithConfig(DefaultDecompressConfig)
}
//DecompressWithConfig decompresses request body based if content encoding type is set to "gzip" with config
// DecompressWithConfig decompresses request body based if content encoding type is set to "gzip" with config
func DecompressWithConfig(config DecompressConfig) echo.MiddlewareFunc {
// Defaults
if config.Skipper == nil {

View File

@ -38,9 +38,9 @@ func rewriteRulesRegex(rewrite map[string]string) map[*regexp.Regexp]string {
rulesRegex := map[*regexp.Regexp]string{}
for k, v := range rewrite {
k = regexp.QuoteMeta(k)
k = strings.Replace(k, `\*`, "(.*?)", -1)
k = strings.ReplaceAll(k, `\*`, "(.*?)")
if strings.HasPrefix(k, `\^`) {
k = strings.Replace(k, `\^`, "^", -1)
k = strings.ReplaceAll(k, `\^`, "^")
}
k = k + "$"
rulesRegex[regexp.MustCompile(k)] = v

View File

@ -12,7 +12,6 @@ import (
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/labstack/echo/v4"
@ -30,6 +29,33 @@ type (
// Required.
Balancer ProxyBalancer
// RetryCount defines the number of times a failed proxied request should be retried
// using the next available ProxyTarget. Defaults to 0, meaning requests are never retried.
RetryCount int
// RetryFilter defines a function used to determine if a failed request to a
// ProxyTarget should be retried. The RetryFilter will only be called when the number
// of previous retries is less than RetryCount. If the function returns true, the
// request will be retried. The provided error indicates the reason for the request
// failure. When the ProxyTarget is unavailable, the error will be an instance of
// echo.HTTPError with a Code of http.StatusBadGateway. In all other cases, the error
// will indicate an internal error in the Proxy middleware. When a RetryFilter is not
// specified, all requests that fail with http.StatusBadGateway will be retried. A custom
// RetryFilter can be provided to only retry specific requests. Note that RetryFilter is
// only called when the request to the target fails, or an internal error in the Proxy
// middleware has occurred. Successful requests that return a non-200 response code cannot
// be retried.
RetryFilter func(c echo.Context, e error) bool
// ErrorHandler defines a function which can be used to return custom errors from
// the Proxy middleware. ErrorHandler is only invoked when there has been
// either an internal error in the Proxy middleware or the ProxyTarget is
// unavailable. Due to the way requests are proxied, ErrorHandler is not invoked
// when a ProxyTarget returns a non-200 response. In these cases, the response
// is already written so errors cannot be modified. ErrorHandler is only
// invoked after all retry attempts have been exhausted.
ErrorHandler func(c echo.Context, err error) error
// Rewrite defines URL path rewrite rules. The values captured in asterisk can be
// retrieved by index e.g. $1, $2 and so on.
// Examples:
@ -72,26 +98,28 @@ type (
Next(echo.Context) *ProxyTarget
}
// TargetProvider defines an interface that gives the opportunity for balancer to return custom errors when selecting target.
// TargetProvider defines an interface that gives the opportunity for balancer
// to return custom errors when selecting target.
TargetProvider interface {
NextTarget(echo.Context) (*ProxyTarget, error)
}
commonBalancer struct {
targets []*ProxyTarget
mutex sync.RWMutex
mutex sync.Mutex
}
// RandomBalancer implements a random load balancing technique.
randomBalancer struct {
*commonBalancer
commonBalancer
random *rand.Rand
}
// RoundRobinBalancer implements a round-robin load balancing technique.
roundRobinBalancer struct {
*commonBalancer
i uint32
commonBalancer
// tracking the index on `targets` slice for the next `*ProxyTarget` to be used
i int
}
)
@ -107,14 +135,14 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
in, _, err := c.Response().Hijack()
if err != nil {
c.Set("_error", fmt.Sprintf("proxy raw, hijack error=%v, url=%s", t.URL, err))
c.Set("_error", fmt.Errorf("proxy raw, hijack error=%w, url=%s", err, t.URL))
return
}
defer in.Close()
out, err := net.Dial("tcp", t.URL.Host)
if err != nil {
c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, dial error=%v, url=%s", t.URL, err)))
c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, dial error=%v, url=%s", err, t.URL)))
return
}
defer out.Close()
@ -122,7 +150,7 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
// Write header
err = r.Write(out)
if err != nil {
c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, request header copy error=%v, url=%s", t.URL, err)))
c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, request header copy error=%v, url=%s", err, t.URL)))
return
}
@ -136,39 +164,44 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
go cp(in, out)
err = <-errCh
if err != nil && err != io.EOF {
c.Set("_error", fmt.Errorf("proxy raw, copy body error=%v, url=%s", t.URL, err))
c.Set("_error", fmt.Errorf("proxy raw, copy body error=%w, url=%s", err, t.URL))
}
})
}
// NewRandomBalancer returns a random proxy balancer.
func NewRandomBalancer(targets []*ProxyTarget) ProxyBalancer {
b := &randomBalancer{commonBalancer: new(commonBalancer)}
b := randomBalancer{}
b.targets = targets
return b
b.random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
return &b
}
// NewRoundRobinBalancer returns a round-robin proxy balancer.
func NewRoundRobinBalancer(targets []*ProxyTarget) ProxyBalancer {
b := &roundRobinBalancer{commonBalancer: new(commonBalancer)}
b := roundRobinBalancer{}
b.targets = targets
return b
return &b
}
// AddTarget adds an upstream target to the list.
// AddTarget adds an upstream target to the list and returns `true`.
//
// However, if a target with the same name already exists then the operation is aborted returning `false`.
func (b *commonBalancer) AddTarget(target *ProxyTarget) bool {
b.mutex.Lock()
defer b.mutex.Unlock()
for _, t := range b.targets {
if t.Name == target.Name {
return false
}
}
b.mutex.Lock()
defer b.mutex.Unlock()
b.targets = append(b.targets, target)
return true
}
// RemoveTarget removes an upstream target from the list.
// RemoveTarget removes an upstream target from the list by name.
//
// Returns `true` on success, `false` if no target with the name is found.
func (b *commonBalancer) RemoveTarget(name string) bool {
b.mutex.Lock()
defer b.mutex.Unlock()
@ -182,21 +215,58 @@ func (b *commonBalancer) RemoveTarget(name string) bool {
}
// Next randomly returns an upstream target.
//
// Note: `nil` is returned in case upstream target list is empty.
func (b *randomBalancer) Next(c echo.Context) *ProxyTarget {
if b.random == nil {
b.random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
b.mutex.Lock()
defer b.mutex.Unlock()
if len(b.targets) == 0 {
return nil
} else if len(b.targets) == 1 {
return b.targets[0]
}
b.mutex.RLock()
defer b.mutex.RUnlock()
return b.targets[b.random.Intn(len(b.targets))]
}
// Next returns an upstream target using round-robin technique.
// Next returns an upstream target using round-robin technique. In the case
// where a previously failed request is being retried, the round-robin
// balancer will attempt to use the next target relative to the original
// request. If the list of targets held by the balancer is modified while a
// failed request is being retried, it is possible that the balancer will
// return the original failed target.
//
// Note: `nil` is returned in case upstream target list is empty.
func (b *roundRobinBalancer) Next(c echo.Context) *ProxyTarget {
b.i = b.i % uint32(len(b.targets))
t := b.targets[b.i]
atomic.AddUint32(&b.i, 1)
return t
b.mutex.Lock()
defer b.mutex.Unlock()
if len(b.targets) == 0 {
return nil
} else if len(b.targets) == 1 {
return b.targets[0]
}
var i int
const lastIdxKey = "_round_robin_last_index"
// This request is a retry, start from the index of the previous
// target to ensure we don't attempt to retry the request with
// the same failed target
if c.Get(lastIdxKey) != nil {
i = c.Get(lastIdxKey).(int)
i++
if i >= len(b.targets) {
i = 0
}
} else {
// This is a first time request, use the global index
if b.i >= len(b.targets) {
b.i = 0
}
i = b.i
b.i++
}
c.Set(lastIdxKey, i)
return b.targets[i]
}
// Proxy returns a Proxy middleware.
@ -211,14 +281,26 @@ func Proxy(balancer ProxyBalancer) echo.MiddlewareFunc {
// ProxyWithConfig returns a Proxy middleware with config.
// See: `Proxy()`
func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
if config.Balancer == nil {
panic("echo: proxy middleware requires balancer")
}
// Defaults
if config.Skipper == nil {
config.Skipper = DefaultProxyConfig.Skipper
}
if config.Balancer == nil {
panic("echo: proxy middleware requires balancer")
if config.RetryFilter == nil {
config.RetryFilter = func(c echo.Context, e error) bool {
if httpErr, ok := e.(*echo.HTTPError); ok {
return httpErr.Code == http.StatusBadGateway
}
return false
}
}
if config.ErrorHandler == nil {
config.ErrorHandler = func(c echo.Context, err error) error {
return err
}
}
if config.Rewrite != nil {
if config.RegexRewrite == nil {
config.RegexRewrite = make(map[*regexp.Regexp]string)
@ -229,28 +311,17 @@ func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
}
provider, isTargetProvider := config.Balancer.(TargetProvider)
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) (err error) {
return func(c echo.Context) error {
if config.Skipper(c) {
return next(c)
}
req := c.Request()
res := c.Response()
var tgt *ProxyTarget
if isTargetProvider {
tgt, err = provider.NextTarget(c)
if err != nil {
return err
}
} else {
tgt = config.Balancer.Next(c)
}
c.Set(config.ContextKey, tgt)
if err := rewriteURL(config.RegexRewrite, req); err != nil {
return err
return config.ErrorHandler(c, err)
}
// Fix header
@ -266,19 +337,49 @@ func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
req.Header.Set(echo.HeaderXForwardedFor, c.RealIP())
}
// Proxy
switch {
case c.IsWebSocket():
proxyRaw(tgt, c).ServeHTTP(res, req)
case req.Header.Get(echo.HeaderAccept) == "text/event-stream":
default:
proxyHTTP(tgt, c, config).ServeHTTP(res, req)
}
if e, ok := c.Get("_error").(error); ok {
err = e
}
retries := config.RetryCount
for {
var tgt *ProxyTarget
var err error
if isTargetProvider {
tgt, err = provider.NextTarget(c)
if err != nil {
return config.ErrorHandler(c, err)
}
} else {
tgt = config.Balancer.Next(c)
}
return
c.Set(config.ContextKey, tgt)
//If retrying a failed request, clear any previous errors from
//context here so that balancers have the option to check for
//errors that occurred using previous target
if retries < config.RetryCount {
c.Set("_error", nil)
}
// Proxy
switch {
case c.IsWebSocket():
proxyRaw(tgt, c).ServeHTTP(res, req)
case req.Header.Get(echo.HeaderAccept) == "text/event-stream":
default:
proxyHTTP(tgt, c, config).ServeHTTP(res, req)
}
err, hasError := c.Get("_error").(error)
if !hasError {
return nil
}
retry := retries > 0 && config.RetryFilter(c, err)
if !retry {
return config.ErrorHandler(c, err)
}
retries--
}
}
}
}

View File

@ -160,6 +160,8 @@ type (
burst int
expiresIn time.Duration
lastCleanup time.Time
timeNow func() time.Time
}
// Visitor signifies a unique user's limiter details
Visitor struct {
@ -219,7 +221,8 @@ func NewRateLimiterMemoryStoreWithConfig(config RateLimiterMemoryStoreConfig) (s
store.burst = int(config.Rate)
}
store.visitors = make(map[string]*Visitor)
store.lastCleanup = now()
store.timeNow = time.Now
store.lastCleanup = store.timeNow()
return
}
@ -244,12 +247,13 @@ func (store *RateLimiterMemoryStore) Allow(identifier string) (bool, error) {
limiter.Limiter = rate.NewLimiter(store.rate, store.burst)
store.visitors[identifier] = limiter
}
limiter.lastSeen = now()
if now().Sub(store.lastCleanup) > store.expiresIn {
now := store.timeNow()
limiter.lastSeen = now
if now.Sub(store.lastCleanup) > store.expiresIn {
store.cleanupStaleVisitors()
}
store.mutex.Unlock()
return limiter.AllowN(now(), 1), nil
return limiter.AllowN(store.timeNow(), 1), nil
}
/*
@ -258,14 +262,9 @@ of users who haven't visited again after the configured expiry time has elapsed
*/
func (store *RateLimiterMemoryStore) cleanupStaleVisitors() {
for id, visitor := range store.visitors {
if now().Sub(visitor.lastSeen) > store.expiresIn {
if store.timeNow().Sub(visitor.lastSeen) > store.expiresIn {
delete(store.visitors, id)
}
}
store.lastCleanup = now()
store.lastCleanup = store.timeNow()
}
/*
actual time method which is mocked in test file
*/
var now = time.Now

View File

@ -37,19 +37,26 @@ type (
// LogErrorFunc defines a function for custom logging in the middleware.
// If it's set you don't need to provide LogLevel for config.
// If this function returns nil, the centralized HTTPErrorHandler will not be called.
LogErrorFunc LogErrorFunc
// DisableErrorHandler disables the call to centralized HTTPErrorHandler.
// The recovered error is then passed back to upstream middleware, instead of swallowing the error.
// Optional. Default value false.
DisableErrorHandler bool `yaml:"disable_error_handler"`
}
)
var (
// DefaultRecoverConfig is the default Recover middleware config.
DefaultRecoverConfig = RecoverConfig{
Skipper: DefaultSkipper,
StackSize: 4 << 10, // 4 KB
DisableStackAll: false,
DisablePrintStack: false,
LogLevel: 0,
LogErrorFunc: nil,
Skipper: DefaultSkipper,
StackSize: 4 << 10, // 4 KB
DisableStackAll: false,
DisablePrintStack: false,
LogLevel: 0,
LogErrorFunc: nil,
DisableErrorHandler: false,
}
)
@ -71,7 +78,7 @@ func RecoverWithConfig(config RecoverConfig) echo.MiddlewareFunc {
}
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
return func(c echo.Context) (returnErr error) {
if config.Skipper(c) {
return next(c)
}
@ -113,7 +120,12 @@ func RecoverWithConfig(config RecoverConfig) echo.MiddlewareFunc {
c.Logger().Print(msg)
}
}
c.Error(err)
if err != nil && !config.DisableErrorHandler {
c.Error(err)
} else {
returnErr = err
}
}
}()
return next(c)

View File

@ -225,7 +225,7 @@ func (config RequestLoggerConfig) ToMiddleware() (echo.MiddlewareFunc, error) {
if config.Skipper == nil {
config.Skipper = DefaultSkipper
}
now = time.Now
now := time.Now
if config.timeNow != nil {
now = config.timeNow
}
@ -257,7 +257,7 @@ func (config RequestLoggerConfig) ToMiddleware() (echo.MiddlewareFunc, error) {
config.BeforeNextFunc(c)
}
err := next(c)
if config.HandleError {
if err != nil && config.HandleError {
c.Error(err)
}

View File

@ -94,6 +94,13 @@ func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return r.Writer.(http.Hijacker).Hijack()
}
// Unwrap returns the original http.ResponseWriter.
// ResponseController can be used to access the original http.ResponseWriter.
// See [https://go.dev/blog/go1.20]
func (r *Response) Unwrap() http.ResponseWriter {
return r.Writer
}
func (r *Response) reset(w http.ResponseWriter) {
r.beforeFuncs = nil
r.afterFuncs = nil

View File

@ -151,7 +151,7 @@ func (r *Router) Routes() []*Route {
return routes
}
// Reverse generates an URL from route name and provided parameters.
// Reverse generates a URL from route name and provided parameters.
func (r *Router) Reverse(name string, params ...interface{}) string {
uri := new(bytes.Buffer)
ln := len(params)
@ -159,7 +159,12 @@ func (r *Router) Reverse(name string, params ...interface{}) string {
for _, route := range r.routes {
if route.Name == name {
for i, l := 0, len(route.Path); i < l; i++ {
if (route.Path[i] == ':' || route.Path[i] == '*') && n < ln {
hasBackslash := route.Path[i] == '\\'
if hasBackslash && i+1 < l && route.Path[i+1] == ':' {
i++ // backslash before colon escapes that colon. in that case skip backslash
}
if n < ln && (route.Path[i] == '*' || (!hasBackslash && route.Path[i] == ':')) {
// in case of `*` wildcard or `:` (unescaped colon) param we replace everything till next slash or end of path
for ; i < l && route.Path[i] != '/'; i++ {
}
uri.WriteString(fmt.Sprintf("%v", params[n]))

View File

@ -220,6 +220,11 @@ type ObjectInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
Internal *struct {
K int // Data blocks
M int // Parity blocks
} `xml:"Internal"`
// Error
Err error `json:"-"`
}

View File

@ -97,7 +97,15 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
defer func() {
if contextCanceled(ctx) {
objectStatCh <- ObjectInfo{
Err: ctx.Err(),
}
}
close(objectStatCh)
}()
// Save continuationToken for next request.
var continuationToken string
for {
@ -304,7 +312,14 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
defer func() {
if contextCanceled(ctx) {
objectStatCh <- ObjectInfo{
Err: ctx.Err(),
}
}
close(objectStatCh)
}()
marker := opts.StartAfter
for {
@ -321,6 +336,7 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb
for _, object := range result.Contents {
// Save the marker.
marker = object.Key
object.ETag = trimEtag(object.ETag)
select {
// Send object content.
case objectStatCh <- object:
@ -393,7 +409,14 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
// Initiate list objects goroutine here.
go func(resultCh chan<- ObjectInfo) {
defer close(resultCh)
defer func() {
if contextCanceled(ctx) {
resultCh <- ObjectInfo{
Err: ctx.Err(),
}
}
close(resultCh)
}()
var (
keyMarker = ""
@ -424,6 +447,7 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
IsDeleteMarker: version.isDeleteMarker,
UserTags: version.UserTags,
UserMetadata: version.UserMetadata,
Internal: version.Internal,
}
select {
// Send object version info.
@ -698,6 +722,10 @@ func (o *ListObjectsOptions) Set(key, value string) {
// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
// fmt.Println(object)
// }
//
// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error()
// caller must drain the channel entirely and wait until channel is closed before proceeding, without
// waiting on the channel to be closed completely you might leak goroutines.
func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
if opts.WithVersions {
return c.listObjectVersions(ctx, bucketName, opts)
@ -738,6 +766,16 @@ func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPr
return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
}
// contextCanceled returns whether a context is canceled.
func contextCanceled(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
// listIncompleteUploads lists all incomplete uploads.
func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads.
@ -765,7 +803,15 @@ func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPr
return objectMultipartStatCh
}
go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
defer close(objectMultipartStatCh)
defer func() {
if contextCanceled(ctx) {
objectMultipartStatCh <- ObjectMultipartInfo{
Err: ctx.Err(),
}
}
close(objectMultipartStatCh)
}()
// object and upload ID marker for future requests.
var objectMarker string
var uploadIDMarker string

View File

@ -93,6 +93,11 @@ type Version struct {
// Only returned by MinIO servers.
UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
Internal *struct {
K int // Data blocks
M int // Parity blocks
} `xml:"Internal"`
isDeleteMarker bool
}

View File

@ -124,7 +124,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.59"
libraryVersion = "v7.0.60"
)
// User Agent should always following the below style.

View File

@ -2471,7 +2471,8 @@ func testTrailingChecksums() {
PO minio.PutObjectOptions
}{
// Currently there is no way to override the checksum type.
{header: "x-amz-checksum-crc32c",
{
header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
@ -2481,7 +2482,8 @@ func testTrailingChecksums() {
PartSize: 5 << 20,
},
},
{header: "x-amz-checksum-crc32c",
{
header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
@ -2491,7 +2493,8 @@ func testTrailingChecksums() {
PartSize: 6_645_654, // Rather arbitrary size
},
},
{header: "x-amz-checksum-crc32c",
{
header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
@ -2501,7 +2504,8 @@ func testTrailingChecksums() {
PartSize: 5 << 20,
},
},
{header: "x-amz-checksum-crc32c",
{
header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{

View File

@ -308,19 +308,27 @@ func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartEle
}
// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
type ExpireDeleteMarker bool
type ExpireDeleteMarker ExpirationBoolean
// IsEnabled returns true if the auto delete-marker expiration is enabled
func (e ExpireDeleteMarker) IsEnabled() bool {
return bool(e)
}
// ExpirationBoolean represents an XML version of 'bool' type
type ExpirationBoolean bool
// MarshalXML encodes delete marker boolean into an XML form.
func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if !b {
return nil
}
type expireDeleteMarkerWrapper ExpireDeleteMarker
return e.EncodeElement(expireDeleteMarkerWrapper(b), startElement)
type booleanWrapper ExpirationBoolean
return e.EncodeElement(booleanWrapper(b), startElement)
}
// IsEnabled returns true if the auto delete-marker expiration is enabled
func (b ExpireDeleteMarker) IsEnabled() bool {
// IsEnabled returns true if the expiration boolean is enabled
func (b ExpirationBoolean) IsEnabled() bool {
return bool(b)
}
@ -330,6 +338,7 @@ type Expiration struct {
Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"`
}
// MarshalJSON customizes json encoding by removing empty day/date specification.
@ -338,10 +347,12 @@ func (e Expiration) MarshalJSON() ([]byte, error) {
Date *ExpirationDate `json:"Date,omitempty"`
Days *ExpirationDays `json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"`
}
newexp := expiration{
DeleteMarker: e.DeleteMarker,
DeleteAll: e.DeleteAll,
}
if !e.IsDaysNull() {
newexp.Days = &e.Days

View File

@ -203,6 +203,10 @@ func (tags *tagSet) set(key, value string, failOnExist bool) error {
return nil
}
func (tags tagSet) count() int {
return len(tags.tagMap)
}
func (tags tagSet) toMap() map[string]string {
m := make(map[string]string, len(tags.tagMap))
for key, value := range tags.tagMap {
@ -279,6 +283,11 @@ func (tags *Tags) Set(key, value string) error {
return tags.TagSet.set(key, value, false)
}
// Count - return number of tags accounted for
func (tags Tags) Count() int {
return tags.TagSet.count()
}
// ToMap returns copy of tags.
func (tags Tags) ToMap() map[string]string {
return tags.TagSet.toMap()

20
vendor/github.com/tidwall/gjson/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2016 Josh Baker
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

497
vendor/github.com/tidwall/gjson/README.md generated vendored Normal file
View File

@ -0,0 +1,497 @@
<p align="center">
<img
src="logo.png"
width="240" height="78" border="0" alt="GJSON">
<br>
<a href="https://godoc.org/github.com/tidwall/gjson"><img src="https://img.shields.io/badge/api-reference-blue.svg?style=flat-square" alt="GoDoc"></a>
<a href="https://tidwall.com/gjson-play"><img src="https://img.shields.io/badge/%F0%9F%8F%90-playground-9900cc.svg?style=flat-square" alt="GJSON Playground"></a>
<a href="SYNTAX.md"><img src="https://img.shields.io/badge/{}-syntax-33aa33.svg?style=flat-square" alt="GJSON Syntax"></a>
</p>
<p align="center">get json values quickly</a></p>
GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document.
It has features such as [one line retrieval](#get-a-value), [dot notation paths](#path-syntax), [iteration](#iterate-through-an-object-or-array), and [parsing json lines](#json-lines).
Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool.
This README is a quick overview of how to use GJSON, for more information check out [GJSON Syntax](SYNTAX.md).
GJSON is also available for [Python](https://github.com/volans-/gjson-py) and [Rust](https://github.com/tidwall/gjson.rs)
Getting Started
===============
## Installing
To start using GJSON, install Go and run `go get`:
```sh
$ go get -u github.com/tidwall/gjson
```
This will retrieve the library.
## Get a value
Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". When the value is found it's returned immediately.
```go
package main
import "github.com/tidwall/gjson"
const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}`
func main() {
value := gjson.Get(json, "name.last")
println(value.String())
}
```
This will print:
```
Prichard
```
*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.*
## Path Syntax
Below is a quick overview of the path syntax, for more complete information please
check out [GJSON Syntax](SYNTAX.md).
A path is a series of keys separated by a dot.
A key may contain special wildcard characters '\*' and '?'.
To access an array value use the index as the key.
To get the number of elements in an array or to access a child path, use the '#' character.
The dot and wildcard characters can be escaped with '\\'.
```json
{
"name": {"first": "Tom", "last": "Anderson"},
"age":37,
"children": ["Sara","Alex","Jack"],
"fav.movie": "Deer Hunter",
"friends": [
{"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]},
{"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]},
{"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]}
]
}
```
```
"name.last" >> "Anderson"
"age" >> 37
"children" >> ["Sara","Alex","Jack"]
"children.#" >> 3
"children.1" >> "Alex"
"child*.2" >> "Jack"
"c?ildren.0" >> "Sara"
"fav\.movie" >> "Deer Hunter"
"friends.#.first" >> ["Dale","Roger","Jane"]
"friends.1.last" >> "Craig"
```
You can also query an array for the first match by using `#(...)`, or find all
matches with `#(...)#`. Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=`
comparison operators and the simple pattern matching `%` (like) and `!%`
(not like) operators.
```
friends.#(last=="Murphy").first >> "Dale"
friends.#(last=="Murphy")#.first >> ["Dale","Jane"]
friends.#(age>45)#.last >> ["Craig","Murphy"]
friends.#(first%"D*").last >> "Murphy"
friends.#(first!%"D*").last >> "Craig"
friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"]
```
*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was
changed in v1.3.0 as to avoid confusion with the new
[multipath](SYNTAX.md#multipaths) syntax. For backwards compatibility,
`#[...]` will continue to work until the next major release.*
## Result Type
GJSON supports the json types `string`, `number`, `bool`, and `null`.
Arrays and Objects are returned as their raw json types.
The `Result` type holds one of these:
```
bool, for JSON booleans
float64, for JSON numbers
string, for JSON string literals
nil, for JSON null
```
To directly access the value:
```go
result.Type // can be String, Number, True, False, Null, or JSON
result.Str // holds the string
result.Num // holds the float64 number
result.Raw // holds the raw json
result.Index // index of raw value in original json, zero means index unknown
result.Indexes // indexes of all the elements that match on a path containing the '#' query character.
```
There are a variety of handy functions that work on a result:
```go
result.Exists() bool
result.Value() interface{}
result.Int() int64
result.Uint() uint64
result.Float() float64
result.String() string
result.Bool() bool
result.Time() time.Time
result.Array() []gjson.Result
result.Map() map[string]gjson.Result
result.Get(path string) Result
result.ForEach(iterator func(key, value Result) bool)
result.Less(token Result, caseSensitive bool) bool
```
The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types:
```go
boolean >> bool
number >> float64
string >> string
null >> nil
array >> []interface{}
object >> map[string]interface{}
```
The `result.Array()` function returns back an array of values.
If the result represents a non-existent value, then an empty array will be returned.
If the result is not a JSON array, the return value will be an array containing one result.
### 64-bit integers
The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, allowing for large JSON integers.
```go
result.Int() int64 // -9223372036854775808 to 9223372036854775807
result.Uint() uint64 // 0 to 18446744073709551615
```
## Modifiers and path chaining
New in version 1.2 is support for modifier functions and path chaining.
A modifier is a path component that performs custom processing on the
json.
Multiple paths can be "chained" together using the pipe character.
This is useful for getting results from a modified query.
For example, using the built-in `@reverse` modifier on the above json document,
we'll get `children` array and reverse the order:
```
"children|@reverse" >> ["Jack","Alex","Sara"]
"children|@reverse|0" >> "Jack"
```
There are currently the following built-in modifiers:
- `@reverse`: Reverse an array or the members of an object.
- `@ugly`: Remove all whitespace from a json document.
- `@pretty`: Make the json document more human readable.
- `@this`: Returns the current element. It can be used to retrieve the root element.
- `@valid`: Ensure the json document is valid.
- `@flatten`: Flattens an array.
- `@join`: Joins multiple objects into a single object.
- `@keys`: Returns an array of keys for an object.
- `@values`: Returns an array of values for an object.
- `@tostr`: Converts json to a string. Wraps a json string.
- `@fromstr`: Converts a string from json. Unwraps a json string.
- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db).
### Modifier arguments
A modifier may accept an optional argument. The argument can be a valid JSON
document or just characters.
For example, the `@pretty` modifier takes a json object as its argument.
```
@pretty:{"sortKeys":true}
```
Which makes the json pretty and orders all of its keys.
```json
{
"age":37,
"children": ["Sara","Alex","Jack"],
"fav.movie": "Deer Hunter",
"friends": [
{"age": 44, "first": "Dale", "last": "Murphy"},
{"age": 68, "first": "Roger", "last": "Craig"},
{"age": 47, "first": "Jane", "last": "Murphy"}
],
"name": {"first": "Tom", "last": "Anderson"}
}
```
*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`.
Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.*
### Custom modifiers
You can also add custom modifiers.
For example, here we create a modifier that makes the entire json document upper
or lower case.
```go
gjson.AddModifier("case", func(json, arg string) string {
if arg == "upper" {
return strings.ToUpper(json)
}
if arg == "lower" {
return strings.ToLower(json)
}
return json
})
```
```
"children|@case:upper" >> ["SARA","ALEX","JACK"]
"children|@case:lower|@reverse" >> ["jack","alex","sara"]
```
## JSON Lines
There's support for [JSON Lines](http://jsonlines.org/) using the `..` prefix, which treats a multilined document as an array.
For example:
```
{"name": "Gilbert", "age": 61}
{"name": "Alexa", "age": 34}
{"name": "May", "age": 57}
{"name": "Deloise", "age": 44}
```
```
..# >> 4
..1 >> {"name": "Alexa", "age": 34}
..3 >> {"name": "Deloise", "age": 44}
..#.name >> ["Gilbert","Alexa","May","Deloise"]
..#(name="May").age >> 57
```
The `ForEachLines` function will iterate through JSON lines.
```go
gjson.ForEachLine(json, func(line gjson.Result) bool{
println(line.String())
return true
})
```
## Get nested array values
Suppose you want all the last names from the following json:
```json
{
"programmers": [
{
"firstName": "Janet",
"lastName": "McLaughlin",
}, {
"firstName": "Elliotte",
"lastName": "Hunter",
}, {
"firstName": "Jason",
"lastName": "Harold",
}
]
}
```
You would use the path "programmers.#.lastName" like such:
```go
result := gjson.Get(json, "programmers.#.lastName")
for _, name := range result.Array() {
println(name.String())
}
```
You can also query an object inside an array:
```go
name := gjson.Get(json, `programmers.#(lastName="Hunter").firstName`)
println(name.String()) // prints "Elliotte"
```
## Iterate through an object or array
The `ForEach` function allows for quickly iterating through an object or array.
The key and value are passed to the iterator function for objects.
Only the value is passed for arrays.
Returning `false` from an iterator will stop iteration.
```go
result := gjson.Get(json, "programmers")
result.ForEach(func(key, value gjson.Result) bool {
println(value.String())
return true // keep iterating
})
```
## Simple Parse and Get
There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result.
For example, all of these will return the same result:
```go
gjson.Parse(json).Get("name").Get("last")
gjson.Get(json, "name").Get("last")
gjson.Get(json, "name.last")
```
## Check for the existence of a value
Sometimes you just want to know if a value exists.
```go
value := gjson.Get(json, "name.last")
if !value.Exists() {
println("no last name")
} else {
println(value.String())
}
// Or as one step
if gjson.Get(json, "name.last").Exists() {
println("has a last name")
}
```
## Validate JSON
The `Get*` and `Parse*` functions expects that the json is well-formed. Bad json will not panic, but it may return back unexpected results.
If you are consuming JSON from an unpredictable source then you may want to validate prior to using GJSON.
```go
if !gjson.Valid(json) {
return errors.New("invalid json")
}
value := gjson.Get(json, "name.last")
```
## Unmarshal to a map
To unmarshal to a `map[string]interface{}`:
```go
m, ok := gjson.Parse(json).Value().(map[string]interface{})
if !ok {
// not a map
}
```
## Working with Bytes
If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`.
```go
var json []byte = ...
result := gjson.GetBytes(json, path)
```
If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern:
```go
var json []byte = ...
result := gjson.GetBytes(json, path)
var raw []byte
if result.Index > 0 {
raw = json[result.Index:result.Index+len(result.Raw)]
} else {
raw = []byte(result.Raw)
}
```
This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`.
## Get multiple values at once
The `GetMany` function can be used to get multiple values at the same time.
```go
results := gjson.GetMany(json, "name.first", "name.last", "age")
```
The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths.
## Performance
Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/),
[ffjson](https://github.com/pquerna/ffjson),
[EasyJSON](https://github.com/mailru/easyjson),
[jsonparser](https://github.com/buger/jsonparser),
and [json-iterator](https://github.com/json-iterator/go)
```
BenchmarkGJSONGet-16 11644512 311 ns/op 0 B/op 0 allocs/op
BenchmarkGJSONUnmarshalMap-16 1122678 3094 ns/op 1920 B/op 26 allocs/op
BenchmarkJSONUnmarshalMap-16 516681 6810 ns/op 2944 B/op 69 allocs/op
BenchmarkJSONUnmarshalStruct-16 697053 5400 ns/op 928 B/op 13 allocs/op
BenchmarkJSONDecoder-16 330450 10217 ns/op 3845 B/op 160 allocs/op
BenchmarkFFJSONLexer-16 1424979 2585 ns/op 880 B/op 8 allocs/op
BenchmarkEasyJSONLexer-16 3000000 729 ns/op 501 B/op 5 allocs/op
BenchmarkJSONParserGet-16 3000000 366 ns/op 21 B/op 0 allocs/op
BenchmarkJSONIterator-16 3000000 869 ns/op 693 B/op 14 allocs/op
```
JSON document used:
```json
{
"widget": {
"debug": "on",
"window": {
"title": "Sample Konfabulator Widget",
"name": "main_window",
"width": 500,
"height": 500
},
"image": {
"src": "Images/Sun.png",
"hOffset": 250,
"vOffset": 250,
"alignment": "center"
},
"text": {
"data": "Click Here",
"size": 36,
"style": "bold",
"vOffset": 100,
"alignment": "center",
"onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
}
}
}
```
Each operation was rotated through one of the following search paths:
```
widget.window.name
widget.image.hOffset
widget.text.onMouseUp
```
*These benchmarks were run on a MacBook Pro 16" 2.4 GHz Intel Core i9 using Go 1.17 and can be found [here](https://github.com/tidwall/gjson-benchmarks).*

342
vendor/github.com/tidwall/gjson/SYNTAX.md generated vendored Normal file
View File

@ -0,0 +1,342 @@
# GJSON Path Syntax
A GJSON Path is a text string syntax that describes a search pattern for quickly retreiving values from a JSON payload.
This document is designed to explain the structure of a GJSON Path through examples.
- [Path structure](#path-structure)
- [Basic](#basic)
- [Wildcards](#wildcards)
- [Escape Character](#escape-character)
- [Arrays](#arrays)
- [Queries](#queries)
- [Dot vs Pipe](#dot-vs-pipe)
- [Modifiers](#modifiers)
- [Multipaths](#multipaths)
- [Literals](#literals)
The definitive implemenation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson).
Use the [GJSON Playground](https://gjson.dev) to experiment with the syntax online.
## Path structure
A GJSON Path is intended to be easily expressed as a series of components seperated by a `.` character.
Along with `.` character, there are a few more that have special meaning, including `|`, `#`, `@`, `\`, `*`, `!`, and `?`.
## Example
Given this JSON
```json
{
"name": {"first": "Tom", "last": "Anderson"},
"age":37,
"children": ["Sara","Alex","Jack"],
"fav.movie": "Deer Hunter",
"friends": [
{"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]},
{"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]},
{"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]}
]
}
```
The following GJSON Paths evaluate to the accompanying values.
### Basic
In many cases you'll just want to retreive values by object name or array index.
```go
name.last "Anderson"
name.first "Tom"
age 37
children ["Sara","Alex","Jack"]
children.0 "Sara"
children.1 "Alex"
friends.1 {"first": "Roger", "last": "Craig", "age": 68}
friends.1.first "Roger"
```
### Wildcards
A key may contain the special wildcard characters `*` and `?`.
The `*` will match on any zero+ characters, and `?` matches on any one character.
```go
child*.2 "Jack"
c?ildren.0 "Sara"
```
### Escape character
Special purpose characters, such as `.`, `*`, and `?` can be escaped with `\`.
```go
fav\.movie "Deer Hunter"
```
You'll also need to make sure that the `\` character is correctly escaped when hardcoding a path in your source code.
```go
// Go
val := gjson.Get(json, "fav\\.movie") // must escape the slash
val := gjson.Get(json, `fav\.movie`) // no need to escape the slash
```
```rust
// Rust
let val = gjson::get(json, "fav\\.movie") // must escape the slash
let val = gjson::get(json, r#"fav\.movie"#) // no need to escape the slash
```
### Arrays
The `#` character allows for digging into JSON Arrays.
To get the length of an array you'll just use the `#` all by itself.
```go
friends.# 3
friends.#.age [44,68,47]
```
### Queries
You can also query an array for the first match by using `#(...)`, or find all matches with `#(...)#`.
Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators,
and the simple pattern matching `%` (like) and `!%` (not like) operators.
```go
friends.#(last=="Murphy").first "Dale"
friends.#(last=="Murphy")#.first ["Dale","Jane"]
friends.#(age>45)#.last ["Craig","Murphy"]
friends.#(first%"D*").last "Murphy"
friends.#(first!%"D*").last "Craig"
```
To query for a non-object value in an array, you can forgo the string to the right of the operator.
```go
children.#(!%"*a*") "Alex"
children.#(%"*a*")# ["Sara","Jack"]
```
Nested queries are allowed.
```go
friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"]
```
*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was
changed in v1.3.0 as to avoid confusion with the new [multipath](#multipaths)
syntax. For backwards compatibility, `#[...]` will continue to work until the
next major release.*
The `~` (tilde) operator will convert a value to a boolean before comparison.
For example, using the following JSON:
```json
{
"vals": [
{ "a": 1, "b": true },
{ "a": 2, "b": true },
{ "a": 3, "b": false },
{ "a": 4, "b": "0" },
{ "a": 5, "b": 0 },
{ "a": 6, "b": "1" },
{ "a": 7, "b": 1 },
{ "a": 8, "b": "true" },
{ "a": 9, "b": false },
{ "a": 10, "b": null },
{ "a": 11 }
]
}
```
You can now query for all true(ish) or false(ish) values:
```
vals.#(b==~true)#.a >> [1,2,6,7,8]
vals.#(b==~false)#.a >> [3,4,5,9,10,11]
```
The last value which was non-existent is treated as `false`
### Dot vs Pipe
The `.` is standard separator, but it's also possible to use a `|`.
In most cases they both end up returning the same results.
The cases where`|` differs from `.` is when it's used after the `#` for [Arrays](#arrays) and [Queries](#queries).
Here are some examples
```go
friends.0.first "Dale"
friends|0.first "Dale"
friends.0|first "Dale"
friends|0|first "Dale"
friends|# 3
friends.# 3
friends.#(last="Murphy")# [{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}]
friends.#(last="Murphy")#.first ["Dale","Jane"]
friends.#(last="Murphy")#|first <non-existent>
friends.#(last="Murphy")#.0 []
friends.#(last="Murphy")#|0 {"first": "Dale", "last": "Murphy", "age": 44}
friends.#(last="Murphy")#.# []
friends.#(last="Murphy")#|# 2
```
Let's break down a few of these.
The path `friends.#(last="Murphy")#` all by itself results in
```json
[{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}]
```
The `.first` suffix will process the `first` path on each array element *before* returning the results. Which becomes
```json
["Dale","Jane"]
```
But the `|first` suffix actually processes the `first` path *after* the previous result.
Since the previous result is an array, not an object, it's not possible to process
because `first` does not exist.
Yet, `|0` suffix returns
```json
{"first": "Dale", "last": "Murphy", "age": 44}
```
Because `0` is the first index of the previous result.
### Modifiers
A modifier is a path component that performs custom processing on the JSON.
For example, using the built-in `@reverse` modifier on the above JSON payload will reverse the `children` array:
```go
children.@reverse ["Jack","Alex","Sara"]
children.@reverse.0 "Jack"
```
There are currently the following built-in modifiers:
- `@reverse`: Reverse an array or the members of an object.
- `@ugly`: Remove all whitespace from JSON.
- `@pretty`: Make the JSON more human readable.
- `@this`: Returns the current element. It can be used to retrieve the root element.
- `@valid`: Ensure the json document is valid.
- `@flatten`: Flattens an array.
- `@join`: Joins multiple objects into a single object.
- `@keys`: Returns an array of keys for an object.
- `@values`: Returns an array of values for an object.
- `@tostr`: Converts json to a string. Wraps a json string.
- `@fromstr`: Converts a string from json. Unwraps a json string.
- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db).
#### Modifier arguments
A modifier may accept an optional argument. The argument can be a valid JSON payload or just characters.
For example, the `@pretty` modifier takes a json object as its argument.
```
@pretty:{"sortKeys":true}
```
Which makes the json pretty and orders all of its keys.
```json
{
"age":37,
"children": ["Sara","Alex","Jack"],
"fav.movie": "Deer Hunter",
"friends": [
{"age": 44, "first": "Dale", "last": "Murphy"},
{"age": 68, "first": "Roger", "last": "Craig"},
{"age": 47, "first": "Jane", "last": "Murphy"}
],
"name": {"first": "Tom", "last": "Anderson"}
}
```
*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`.
Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.*
#### Custom modifiers
You can also add custom modifiers.
For example, here we create a modifier which makes the entire JSON payload upper or lower case.
```go
gjson.AddModifier("case", func(json, arg string) string {
if arg == "upper" {
return strings.ToUpper(json)
}
if arg == "lower" {
return strings.ToLower(json)
}
return json
})
"children.@case:upper" ["SARA","ALEX","JACK"]
"children.@case:lower.@reverse" ["jack","alex","sara"]
```
*Note: Custom modifiers are not yet available in the Rust version*
### Multipaths
Starting with v1.3.0, GJSON added the ability to join multiple paths together
to form new documents. Wrapping comma-separated paths between `[...]` or
`{...}` will result in a new array or object, respectively.
For example, using the given multipath:
```
{name.first,age,"the_murphys":friends.#(last="Murphy")#.first}
```
Here we selected the first name, age, and the first name for friends with the
last name "Murphy".
You'll notice that an optional key can be provided, in this case
"the_murphys", to force assign a key to a value. Otherwise, the name of the
actual field will be used, in this case "first". If a name cannot be
determined, then "_" is used.
This results in
```json
{"first":"Tom","age":37,"the_murphys":["Dale","Jane"]}
```
### Literals
Starting with v1.12.0, GJSON added support of json literals, which provides a way for constructing static blocks of json. This is can be particularly useful when constructing a new json document using [multipaths](#multipaths).
A json literal begins with the '!' declaration character.
For example, using the given multipath:
```
{name.first,age,"company":!"Happysoft","employed":!true}
```
Here we selected the first name and age. Then add two new fields, "company" and "employed".
This results in
```json
{"first":"Tom","age":37,"company":"Happysoft","employed":true}
```
*See issue [#249](https://github.com/tidwall/gjson/issues/249) for additional context on JSON Literals.*

3359
vendor/github.com/tidwall/gjson/gjson.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

BIN
vendor/github.com/tidwall/gjson/logo.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

20
vendor/github.com/tidwall/match/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2016 Josh Baker
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

29
vendor/github.com/tidwall/match/README.md generated vendored Normal file
View File

@ -0,0 +1,29 @@
# Match
[![GoDoc](https://godoc.org/github.com/tidwall/match?status.svg)](https://godoc.org/github.com/tidwall/match)
Match is a very simple pattern matcher where '*' matches on any
number characters and '?' matches on any one character.
## Installing
```
go get -u github.com/tidwall/match
```
## Example
```go
match.Match("hello", "*llo")
match.Match("jello", "?ello")
match.Match("hello", "h*o")
```
## Contact
Josh Baker [@tidwall](http://twitter.com/tidwall)
## License
Redcon source code is available under the MIT [License](/LICENSE).

237
vendor/github.com/tidwall/match/match.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
// Package match provides a simple pattern matcher with unicode support.
package match
import (
"unicode/utf8"
)
// Match returns true if str matches pattern. This is a very
// simple wildcard match where '*' matches on any number characters
// and '?' matches on any one character.
//
// pattern:
// { term }
// term:
// '*' matches any sequence of non-Separator characters
// '?' matches any single non-Separator character
// c matches character c (c != '*', '?', '\\')
// '\\' c matches character c
//
func Match(str, pattern string) bool {
if pattern == "*" {
return true
}
return match(str, pattern, 0, nil, -1) == rMatch
}
// MatchLimit is the same as Match but will limit the complexity of the match
// operation. This is to avoid long running matches, specifically to avoid ReDos
// attacks from arbritary inputs.
//
// How it works:
// The underlying match routine is recursive and may call itself when it
// encounters a sandwiched wildcard pattern, such as: `user:*:name`.
// Everytime it calls itself a counter is incremented.
// The operation is stopped when counter > maxcomp*len(str).
func MatchLimit(str, pattern string, maxcomp int) (matched, stopped bool) {
if pattern == "*" {
return true, false
}
counter := 0
r := match(str, pattern, len(str), &counter, maxcomp)
if r == rStop {
return false, true
}
return r == rMatch, false
}
type result int
const (
rNoMatch result = iota
rMatch
rStop
)
func match(str, pat string, slen int, counter *int, maxcomp int) result {
// check complexity limit
if maxcomp > -1 {
if *counter > slen*maxcomp {
return rStop
}
*counter++
}
for len(pat) > 0 {
var wild bool
pc, ps := rune(pat[0]), 1
if pc > 0x7f {
pc, ps = utf8.DecodeRuneInString(pat)
}
var sc rune
var ss int
if len(str) > 0 {
sc, ss = rune(str[0]), 1
if sc > 0x7f {
sc, ss = utf8.DecodeRuneInString(str)
}
}
switch pc {
case '?':
if ss == 0 {
return rNoMatch
}
case '*':
// Ignore repeating stars.
for len(pat) > 1 && pat[1] == '*' {
pat = pat[1:]
}
// If this star is the last character then it must be a match.
if len(pat) == 1 {
return rMatch
}
// Match and trim any non-wildcard suffix characters.
var ok bool
str, pat, ok = matchTrimSuffix(str, pat)
if !ok {
return rNoMatch
}
// Check for single star again.
if len(pat) == 1 {
return rMatch
}
// Perform recursive wildcard search.
r := match(str, pat[1:], slen, counter, maxcomp)
if r != rNoMatch {
return r
}
if len(str) == 0 {
return rNoMatch
}
wild = true
default:
if ss == 0 {
return rNoMatch
}
if pc == '\\' {
pat = pat[ps:]
pc, ps = utf8.DecodeRuneInString(pat)
if ps == 0 {
return rNoMatch
}
}
if sc != pc {
return rNoMatch
}
}
str = str[ss:]
if !wild {
pat = pat[ps:]
}
}
if len(str) == 0 {
return rMatch
}
return rNoMatch
}
// matchTrimSuffix matches and trims any non-wildcard suffix characters.
// Returns the trimed string and pattern.
//
// This is called because the pattern contains extra data after the wildcard
// star. Here we compare any suffix characters in the pattern to the suffix of
// the target string. Basically a reverse match that stops when a wildcard
// character is reached. This is a little trickier than a forward match because
// we need to evaluate an escaped character in reverse.
//
// Any matched characters will be trimmed from both the target
// string and the pattern.
func matchTrimSuffix(str, pat string) (string, string, bool) {
// It's expected that the pattern has at least two bytes and the first byte
// is a wildcard star '*'
match := true
for len(str) > 0 && len(pat) > 1 {
pc, ps := utf8.DecodeLastRuneInString(pat)
var esc bool
for i := 0; ; i++ {
if pat[len(pat)-ps-i-1] != '\\' {
if i&1 == 1 {
esc = true
ps++
}
break
}
}
if pc == '*' && !esc {
match = true
break
}
sc, ss := utf8.DecodeLastRuneInString(str)
if !((pc == '?' && !esc) || pc == sc) {
match = false
break
}
str = str[:len(str)-ss]
pat = pat[:len(pat)-ps]
}
return str, pat, match
}
var maxRuneBytes = [...]byte{244, 143, 191, 191}
// Allowable parses the pattern and determines the minimum and maximum allowable
// values that the pattern can represent.
// When the max cannot be determined, 'true' will be returned
// for infinite.
func Allowable(pattern string) (min, max string) {
if pattern == "" || pattern[0] == '*' {
return "", ""
}
minb := make([]byte, 0, len(pattern))
maxb := make([]byte, 0, len(pattern))
var wild bool
for i := 0; i < len(pattern); i++ {
if pattern[i] == '*' {
wild = true
break
}
if pattern[i] == '?' {
minb = append(minb, 0)
maxb = append(maxb, maxRuneBytes[:]...)
} else {
minb = append(minb, pattern[i])
maxb = append(maxb, pattern[i])
}
}
if wild {
r, n := utf8.DecodeLastRune(maxb)
if r != utf8.RuneError {
if r < utf8.MaxRune {
r++
if r > 0x7f {
b := make([]byte, 4)
nn := utf8.EncodeRune(b, r)
maxb = append(maxb[:len(maxb)-n], b[:nn]...)
} else {
maxb = append(maxb[:len(maxb)-n], byte(r))
}
}
}
}
return string(minb), string(maxb)
}
// IsPattern returns true if the string is a pattern.
func IsPattern(str string) bool {
for i := 0; i < len(str); i++ {
if str[i] == '*' || str[i] == '?' {
return true
}
}
return false
}

20
vendor/github.com/tidwall/pretty/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Josh Baker
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

122
vendor/github.com/tidwall/pretty/README.md generated vendored Normal file
View File

@ -0,0 +1,122 @@
# Pretty
[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/tidwall/pretty)
Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads.
Getting Started
===============
## Installing
To start using Pretty, install Go and run `go get`:
```sh
$ go get -u github.com/tidwall/pretty
```
This will retrieve the library.
## Pretty
Using this example:
```json
{"name": {"first":"Tom","last":"Anderson"}, "age":37,
"children": ["Sara","Alex","Jack"],
"fav.movie": "Deer Hunter", "friends": [
{"first": "Janet", "last": "Murphy", "age": 44}
]}
```
The following code:
```go
result = pretty.Pretty(example)
```
Will format the json to:
```json
{
"name": {
"first": "Tom",
"last": "Anderson"
},
"age": 37,
"children": ["Sara", "Alex", "Jack"],
"fav.movie": "Deer Hunter",
"friends": [
{
"first": "Janet",
"last": "Murphy",
"age": 44
}
]
}
```
## Color
Color will colorize the json for outputing to the screen.
```go
result = pretty.Color(json, nil)
```
Will add color to the result for printing to the terminal.
The second param is used for a customizing the style, and passing nil will use the default `pretty.TerminalStyle`.
## Ugly
The following code:
```go
result = pretty.Ugly(example)
```
Will format the json to:
```json
{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}```
```
## Customized output
There's a `PrettyOptions(json, opts)` function which allows for customizing the output with the following options:
```go
type Options struct {
// Width is an max column width for single line arrays
// Default is 80
Width int
// Prefix is a prefix for all lines
// Default is an empty string
Prefix string
// Indent is the nested indentation
// Default is two spaces
Indent string
// SortKeys will sort the keys alphabetically
// Default is false
SortKeys bool
}
```
## Performance
Benchmarks of Pretty alongside the builtin `encoding/json` Indent/Compact methods.
```
BenchmarkPretty-16 1000000 1034 ns/op 720 B/op 2 allocs/op
BenchmarkPrettySortKeys-16 586797 1983 ns/op 2848 B/op 14 allocs/op
BenchmarkUgly-16 4652365 254 ns/op 240 B/op 1 allocs/op
BenchmarkUglyInPlace-16 6481233 183 ns/op 0 B/op 0 allocs/op
BenchmarkJSONIndent-16 450654 2687 ns/op 1221 B/op 0 allocs/op
BenchmarkJSONCompact-16 685111 1699 ns/op 442 B/op 0 allocs/op
```
*These benchmarks were run on a MacBook Pro 2.4 GHz 8-Core Intel Core i9.*
## Contact
Josh Baker [@tidwall](http://twitter.com/tidwall)
## License
Pretty source code is available under the MIT [License](/LICENSE).

682
vendor/github.com/tidwall/pretty/pretty.go generated vendored Normal file
View File

@ -0,0 +1,682 @@
package pretty
import (
"bytes"
"encoding/json"
"sort"
"strconv"
)
// Options is Pretty options
type Options struct {
// Width is an max column width for single line arrays
// Default is 80
Width int
// Prefix is a prefix for all lines
// Default is an empty string
Prefix string
// Indent is the nested indentation
// Default is two spaces
Indent string
// SortKeys will sort the keys alphabetically
// Default is false
SortKeys bool
}
// DefaultOptions is the default options for pretty formats.
var DefaultOptions = &Options{Width: 80, Prefix: "", Indent: " ", SortKeys: false}
// Pretty converts the input json into a more human readable format where each
// element is on it's own line with clear indentation.
func Pretty(json []byte) []byte { return PrettyOptions(json, nil) }
// PrettyOptions is like Pretty but with customized options.
func PrettyOptions(json []byte, opts *Options) []byte {
if opts == nil {
opts = DefaultOptions
}
buf := make([]byte, 0, len(json))
if len(opts.Prefix) != 0 {
buf = append(buf, opts.Prefix...)
}
buf, _, _, _ = appendPrettyAny(buf, json, 0, true,
opts.Width, opts.Prefix, opts.Indent, opts.SortKeys,
0, 0, -1)
if len(buf) > 0 {
buf = append(buf, '\n')
}
return buf
}
// Ugly removes insignificant space characters from the input json byte slice
// and returns the compacted result.
func Ugly(json []byte) []byte {
buf := make([]byte, 0, len(json))
return ugly(buf, json)
}
// UglyInPlace removes insignificant space characters from the input json
// byte slice and returns the compacted result. This method reuses the
// input json buffer to avoid allocations. Do not use the original bytes
// slice upon return.
func UglyInPlace(json []byte) []byte { return ugly(json, json) }
func ugly(dst, src []byte) []byte {
dst = dst[:0]
for i := 0; i < len(src); i++ {
if src[i] > ' ' {
dst = append(dst, src[i])
if src[i] == '"' {
for i = i + 1; i < len(src); i++ {
dst = append(dst, src[i])
if src[i] == '"' {
j := i - 1
for ; ; j-- {
if src[j] != '\\' {
break
}
}
if (j-i)%2 != 0 {
break
}
}
}
}
}
}
return dst
}
func isNaNOrInf(src []byte) bool {
return src[0] == 'i' || //Inf
src[0] == 'I' || // inf
src[0] == '+' || // +Inf
src[0] == 'N' || // Nan
(src[0] == 'n' && len(src) > 1 && src[1] != 'u') // nan
}
func appendPrettyAny(buf, json []byte, i int, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) {
for ; i < len(json); i++ {
if json[i] <= ' ' {
continue
}
if json[i] == '"' {
return appendPrettyString(buf, json, i, nl)
}
if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' || isNaNOrInf(json[i:]) {
return appendPrettyNumber(buf, json, i, nl)
}
if json[i] == '{' {
return appendPrettyObject(buf, json, i, '{', '}', pretty, width, prefix, indent, sortkeys, tabs, nl, max)
}
if json[i] == '[' {
return appendPrettyObject(buf, json, i, '[', ']', pretty, width, prefix, indent, sortkeys, tabs, nl, max)
}
switch json[i] {
case 't':
return append(buf, 't', 'r', 'u', 'e'), i + 4, nl, true
case 'f':
return append(buf, 'f', 'a', 'l', 's', 'e'), i + 5, nl, true
case 'n':
return append(buf, 'n', 'u', 'l', 'l'), i + 4, nl, true
}
}
return buf, i, nl, true
}
type pair struct {
kstart, kend int
vstart, vend int
}
type byKeyVal struct {
sorted bool
json []byte
buf []byte
pairs []pair
}
func (arr *byKeyVal) Len() int {
return len(arr.pairs)
}
func (arr *byKeyVal) Less(i, j int) bool {
if arr.isLess(i, j, byKey) {
return true
}
if arr.isLess(j, i, byKey) {
return false
}
return arr.isLess(i, j, byVal)
}
func (arr *byKeyVal) Swap(i, j int) {
arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i]
arr.sorted = true
}
type byKind int
const (
byKey byKind = 0
byVal byKind = 1
)
type jtype int
const (
jnull jtype = iota
jfalse
jnumber
jstring
jtrue
jjson
)
func getjtype(v []byte) jtype {
if len(v) == 0 {
return jnull
}
switch v[0] {
case '"':
return jstring
case 'f':
return jfalse
case 't':
return jtrue
case 'n':
return jnull
case '[', '{':
return jjson
default:
return jnumber
}
}
func (arr *byKeyVal) isLess(i, j int, kind byKind) bool {
k1 := arr.json[arr.pairs[i].kstart:arr.pairs[i].kend]
k2 := arr.json[arr.pairs[j].kstart:arr.pairs[j].kend]
var v1, v2 []byte
if kind == byKey {
v1 = k1
v2 = k2
} else {
v1 = bytes.TrimSpace(arr.buf[arr.pairs[i].vstart:arr.pairs[i].vend])
v2 = bytes.TrimSpace(arr.buf[arr.pairs[j].vstart:arr.pairs[j].vend])
if len(v1) >= len(k1)+1 {
v1 = bytes.TrimSpace(v1[len(k1)+1:])
}
if len(v2) >= len(k2)+1 {
v2 = bytes.TrimSpace(v2[len(k2)+1:])
}
}
t1 := getjtype(v1)
t2 := getjtype(v2)
if t1 < t2 {
return true
}
if t1 > t2 {
return false
}
if t1 == jstring {
s1 := parsestr(v1)
s2 := parsestr(v2)
return string(s1) < string(s2)
}
if t1 == jnumber {
n1, _ := strconv.ParseFloat(string(v1), 64)
n2, _ := strconv.ParseFloat(string(v2), 64)
return n1 < n2
}
return string(v1) < string(v2)
}
func parsestr(s []byte) []byte {
for i := 1; i < len(s); i++ {
if s[i] == '\\' {
var str string
json.Unmarshal(s, &str)
return []byte(str)
}
if s[i] == '"' {
return s[1:i]
}
}
return nil
}
func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) {
var ok bool
if width > 0 {
if pretty && open == '[' && max == -1 {
// here we try to create a single line array
max := width - (len(buf) - nl)
if max > 3 {
s1, s2 := len(buf), i
buf, i, _, ok = appendPrettyObject(buf, json, i, '[', ']', false, width, prefix, "", sortkeys, 0, 0, max)
if ok && len(buf)-s1 <= max {
return buf, i, nl, true
}
buf = buf[:s1]
i = s2
}
} else if max != -1 && open == '{' {
return buf, i, nl, false
}
}
buf = append(buf, open)
i++
var pairs []pair
if open == '{' && sortkeys {
pairs = make([]pair, 0, 8)
}
var n int
for ; i < len(json); i++ {
if json[i] <= ' ' {
continue
}
if json[i] == close {
if pretty {
if open == '{' && sortkeys {
buf = sortPairs(json, buf, pairs)
}
if n > 0 {
nl = len(buf)
if buf[nl-1] == ' ' {
buf[nl-1] = '\n'
} else {
buf = append(buf, '\n')
}
}
if buf[len(buf)-1] != open {
buf = appendTabs(buf, prefix, indent, tabs)
}
}
buf = append(buf, close)
return buf, i + 1, nl, open != '{'
}
if open == '[' || json[i] == '"' {
if n > 0 {
buf = append(buf, ',')
if width != -1 && open == '[' {
buf = append(buf, ' ')
}
}
var p pair
if pretty {
nl = len(buf)
if buf[nl-1] == ' ' {
buf[nl-1] = '\n'
} else {
buf = append(buf, '\n')
}
if open == '{' && sortkeys {
p.kstart = i
p.vstart = len(buf)
}
buf = appendTabs(buf, prefix, indent, tabs+1)
}
if open == '{' {
buf, i, nl, _ = appendPrettyString(buf, json, i, nl)
if sortkeys {
p.kend = i
}
buf = append(buf, ':')
if pretty {
buf = append(buf, ' ')
}
}
buf, i, nl, ok = appendPrettyAny(buf, json, i, pretty, width, prefix, indent, sortkeys, tabs+1, nl, max)
if max != -1 && !ok {
return buf, i, nl, false
}
if pretty && open == '{' && sortkeys {
p.vend = len(buf)
if p.kstart > p.kend || p.vstart > p.vend {
// bad data. disable sorting
sortkeys = false
} else {
pairs = append(pairs, p)
}
}
i--
n++
}
}
return buf, i, nl, open != '{'
}
func sortPairs(json, buf []byte, pairs []pair) []byte {
if len(pairs) == 0 {
return buf
}
vstart := pairs[0].vstart
vend := pairs[len(pairs)-1].vend
arr := byKeyVal{false, json, buf, pairs}
sort.Stable(&arr)
if !arr.sorted {
return buf
}
nbuf := make([]byte, 0, vend-vstart)
for i, p := range pairs {
nbuf = append(nbuf, buf[p.vstart:p.vend]...)
if i < len(pairs)-1 {
nbuf = append(nbuf, ',')
nbuf = append(nbuf, '\n')
}
}
return append(buf[:vstart], nbuf...)
}
func appendPrettyString(buf, json []byte, i, nl int) ([]byte, int, int, bool) {
s := i
i++
for ; i < len(json); i++ {
if json[i] == '"' {
var sc int
for j := i - 1; j > s; j-- {
if json[j] == '\\' {
sc++
} else {
break
}
}
if sc%2 == 1 {
continue
}
i++
break
}
}
return append(buf, json[s:i]...), i, nl, true
}
func appendPrettyNumber(buf, json []byte, i, nl int) ([]byte, int, int, bool) {
s := i
i++
for ; i < len(json); i++ {
if json[i] <= ' ' || json[i] == ',' || json[i] == ':' || json[i] == ']' || json[i] == '}' {
break
}
}
return append(buf, json[s:i]...), i, nl, true
}
func appendTabs(buf []byte, prefix, indent string, tabs int) []byte {
if len(prefix) != 0 {
buf = append(buf, prefix...)
}
if len(indent) == 2 && indent[0] == ' ' && indent[1] == ' ' {
for i := 0; i < tabs; i++ {
buf = append(buf, ' ', ' ')
}
} else {
for i := 0; i < tabs; i++ {
buf = append(buf, indent...)
}
}
return buf
}
// Style is the color style
type Style struct {
Key, String, Number [2]string
True, False, Null [2]string
Escape [2]string
Brackets [2]string
Append func(dst []byte, c byte) []byte
}
func hexp(p byte) byte {
switch {
case p < 10:
return p + '0'
default:
return (p - 10) + 'a'
}
}
// TerminalStyle is for terminals
var TerminalStyle *Style
func init() {
TerminalStyle = &Style{
Key: [2]string{"\x1B[1m\x1B[94m", "\x1B[0m"},
String: [2]string{"\x1B[32m", "\x1B[0m"},
Number: [2]string{"\x1B[33m", "\x1B[0m"},
True: [2]string{"\x1B[36m", "\x1B[0m"},
False: [2]string{"\x1B[36m", "\x1B[0m"},
Null: [2]string{"\x1B[2m", "\x1B[0m"},
Escape: [2]string{"\x1B[35m", "\x1B[0m"},
Brackets: [2]string{"\x1B[1m", "\x1B[0m"},
Append: func(dst []byte, c byte) []byte {
if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') {
dst = append(dst, "\\u00"...)
dst = append(dst, hexp((c>>4)&0xF))
return append(dst, hexp((c)&0xF))
}
return append(dst, c)
},
}
}
// Color will colorize the json. The style parma is used for customizing
// the colors. Passing nil to the style param will use the default
// TerminalStyle.
func Color(src []byte, style *Style) []byte {
if style == nil {
style = TerminalStyle
}
apnd := style.Append
if apnd == nil {
apnd = func(dst []byte, c byte) []byte {
return append(dst, c)
}
}
type stackt struct {
kind byte
key bool
}
var dst []byte
var stack []stackt
for i := 0; i < len(src); i++ {
if src[i] == '"' {
key := len(stack) > 0 && stack[len(stack)-1].key
if key {
dst = append(dst, style.Key[0]...)
} else {
dst = append(dst, style.String[0]...)
}
dst = apnd(dst, '"')
esc := false
uesc := 0
for i = i + 1; i < len(src); i++ {
if src[i] == '\\' {
if key {
dst = append(dst, style.Key[1]...)
} else {
dst = append(dst, style.String[1]...)
}
dst = append(dst, style.Escape[0]...)
dst = apnd(dst, src[i])
esc = true
if i+1 < len(src) && src[i+1] == 'u' {
uesc = 5
} else {
uesc = 1
}
} else if esc {
dst = apnd(dst, src[i])
if uesc == 1 {
esc = false
dst = append(dst, style.Escape[1]...)
if key {
dst = append(dst, style.Key[0]...)
} else {
dst = append(dst, style.String[0]...)
}
} else {
uesc--
}
} else {
dst = apnd(dst, src[i])
}
if src[i] == '"' {
j := i - 1
for ; ; j-- {
if src[j] != '\\' {
break
}
}
if (j-i)%2 != 0 {
break
}
}
}
if esc {
dst = append(dst, style.Escape[1]...)
} else if key {
dst = append(dst, style.Key[1]...)
} else {
dst = append(dst, style.String[1]...)
}
} else if src[i] == '{' || src[i] == '[' {
stack = append(stack, stackt{src[i], src[i] == '{'})
dst = append(dst, style.Brackets[0]...)
dst = apnd(dst, src[i])
dst = append(dst, style.Brackets[1]...)
} else if (src[i] == '}' || src[i] == ']') && len(stack) > 0 {
stack = stack[:len(stack)-1]
dst = append(dst, style.Brackets[0]...)
dst = apnd(dst, src[i])
dst = append(dst, style.Brackets[1]...)
} else if (src[i] == ':' || src[i] == ',') && len(stack) > 0 && stack[len(stack)-1].kind == '{' {
stack[len(stack)-1].key = !stack[len(stack)-1].key
dst = append(dst, style.Brackets[0]...)
dst = apnd(dst, src[i])
dst = append(dst, style.Brackets[1]...)
} else {
var kind byte
if (src[i] >= '0' && src[i] <= '9') || src[i] == '-' || isNaNOrInf(src[i:]) {
kind = '0'
dst = append(dst, style.Number[0]...)
} else if src[i] == 't' {
kind = 't'
dst = append(dst, style.True[0]...)
} else if src[i] == 'f' {
kind = 'f'
dst = append(dst, style.False[0]...)
} else if src[i] == 'n' {
kind = 'n'
dst = append(dst, style.Null[0]...)
} else {
dst = apnd(dst, src[i])
}
if kind != 0 {
for ; i < len(src); i++ {
if src[i] <= ' ' || src[i] == ',' || src[i] == ':' || src[i] == ']' || src[i] == '}' {
i--
break
}
dst = apnd(dst, src[i])
}
if kind == '0' {
dst = append(dst, style.Number[1]...)
} else if kind == 't' {
dst = append(dst, style.True[1]...)
} else if kind == 'f' {
dst = append(dst, style.False[1]...)
} else if kind == 'n' {
dst = append(dst, style.Null[1]...)
}
}
}
}
return dst
}
// Spec strips out comments and trailing commas and convert the input to a
// valid JSON per the official spec: https://tools.ietf.org/html/rfc8259
//
// The resulting JSON will always be the same length as the input and it will
// include all of the same line breaks at matching offsets. This is to ensure
// the result can be later processed by a external parser and that that
// parser will report messages or errors with the correct offsets.
func Spec(src []byte) []byte {
return spec(src, nil)
}
// SpecInPlace is the same as Spec, but this method reuses the input json
// buffer to avoid allocations. Do not use the original bytes slice upon return.
func SpecInPlace(src []byte) []byte {
return spec(src, src)
}
func spec(src, dst []byte) []byte {
dst = dst[:0]
for i := 0; i < len(src); i++ {
if src[i] == '/' {
if i < len(src)-1 {
if src[i+1] == '/' {
dst = append(dst, ' ', ' ')
i += 2
for ; i < len(src); i++ {
if src[i] == '\n' {
dst = append(dst, '\n')
break
} else if src[i] == '\t' || src[i] == '\r' {
dst = append(dst, src[i])
} else {
dst = append(dst, ' ')
}
}
continue
}
if src[i+1] == '*' {
dst = append(dst, ' ', ' ')
i += 2
for ; i < len(src)-1; i++ {
if src[i] == '*' && src[i+1] == '/' {
dst = append(dst, ' ', ' ')
i++
break
} else if src[i] == '\n' || src[i] == '\t' ||
src[i] == '\r' {
dst = append(dst, src[i])
} else {
dst = append(dst, ' ')
}
}
continue
}
}
}
dst = append(dst, src[i])
if src[i] == '"' {
for i = i + 1; i < len(src); i++ {
dst = append(dst, src[i])
if src[i] == '"' {
j := i - 1
for ; ; j-- {
if src[j] != '\\' {
break
}
}
if (j-i)%2 != 0 {
break
}
}
}
} else if src[i] == '}' || src[i] == ']' {
for j := len(dst) - 2; j >= 0; j-- {
if dst[j] <= ' ' {
continue
}
if dst[j] == ',' {
dst[j] = ' '
}
break
}
}
}
return dst
}

View File

@ -382,7 +382,7 @@ func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLo
}
}
for _, schemaArg := range dirDefinition.Arguments {
if schemaArg.Type.NonNull {
if schemaArg.Type.NonNull && schemaArg.DefaultValue == nil {
if arg := dir.Arguments.ForName(schemaArg.Name); arg == nil || arg.Value.Kind == NullValue {
return gqlerror.ErrorPosf(dir.Position, "Argument %s for directive %s cannot be null.", schemaArg.Name, dir.Name)
}

6
vendor/github.com/zeebo/blake3/.gitignore generated vendored Normal file
View File

@ -0,0 +1,6 @@
*.pprof
*.test
*.txt
*.out
/upstream

125
vendor/github.com/zeebo/blake3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,125 @@
This work is released into the public domain with CC0 1.0.
-------------------------------------------------------------------------------
Creative Commons Legal Code
CC0 1.0 Universal
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
HEREUNDER.
Statement of Purpose
The laws of most jurisdictions throughout the world automatically confer
exclusive Copyright and Related Rights (defined below) upon the creator
and subsequent owner(s) (each and all, an "owner") of an original work of
authorship and/or a database (each, a "Work").
Certain owners wish to permanently relinquish those rights to a Work for
the purpose of contributing to a commons of creative, cultural and
scientific works ("Commons") that the public can reliably and without fear
of later claims of infringement build upon, modify, incorporate in other
works, reuse and redistribute as freely as possible in any form whatsoever
and for any purposes, including without limitation commercial purposes.
These owners may contribute to the Commons to promote the ideal of a free
culture and the further production of creative, cultural and scientific
works, or to gain reputation or greater distribution for their Work in
part through the use and efforts of others.
For these and/or other purposes and motivations, and without any
expectation of additional consideration or compensation, the person
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
is an owner of Copyright and Related Rights in the Work, voluntarily
elects to apply CC0 to the Work and publicly distribute the Work under its
terms, with knowledge of his or her Copyright and Related Rights in the
Work and the meaning and intended legal effect of CC0 on those rights.
1. Copyright and Related Rights. A Work made available under CC0 may be
protected by copyright and related or neighboring rights ("Copyright and
Related Rights"). Copyright and Related Rights include, but are not
limited to, the following:
i. the right to reproduce, adapt, distribute, perform, display,
communicate, and translate a Work;
ii. moral rights retained by the original author(s) and/or performer(s);
iii. publicity and privacy rights pertaining to a person's image or
likeness depicted in a Work;
iv. rights protecting against unfair competition in regards to a Work,
subject to the limitations in paragraph 4(a), below;
v. rights protecting the extraction, dissemination, use and reuse of data
in a Work;
vi. database rights (such as those arising under Directive 96/9/EC of the
European Parliament and of the Council of 11 March 1996 on the legal
protection of databases, and under any national implementation
thereof, including any amended or successor version of such
directive); and
vii. other similar, equivalent or corresponding rights throughout the
world based on applicable law or treaty, and any national
implementations thereof.
2. Waiver. To the greatest extent permitted by, but not in contravention
of, applicable law, Affirmer hereby overtly, fully, permanently,
irrevocably and unconditionally waives, abandons, and surrenders all of
Affirmer's Copyright and Related Rights and associated claims and causes
of action, whether now known or unknown (including existing as well as
future claims and causes of action), in the Work (i) in all territories
worldwide, (ii) for the maximum duration provided by applicable law or
treaty (including future time extensions), (iii) in any current or future
medium and for any number of copies, and (iv) for any purpose whatsoever,
including without limitation commercial, advertising or promotional
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
member of the public at large and to the detriment of Affirmer's heirs and
successors, fully intending that such Waiver shall not be subject to
revocation, rescission, cancellation, termination, or any other legal or
equitable action to disrupt the quiet enjoyment of the Work by the public
as contemplated by Affirmer's express Statement of Purpose.
3. Public License Fallback. Should any part of the Waiver for any reason
be judged legally invalid or ineffective under applicable law, then the
Waiver shall be preserved to the maximum extent permitted taking into
account Affirmer's express Statement of Purpose. In addition, to the
extent the Waiver is so judged Affirmer hereby grants to each affected
person a royalty-free, non transferable, non sublicensable, non exclusive,
irrevocable and unconditional license to exercise Affirmer's Copyright and
Related Rights in the Work (i) in all territories worldwide, (ii) for the
maximum duration provided by applicable law or treaty (including future
time extensions), (iii) in any current or future medium and for any number
of copies, and (iv) for any purpose whatsoever, including without
limitation commercial, advertising or promotional purposes (the
"License"). The License shall be deemed effective as of the date CC0 was
applied by Affirmer to the Work. Should any part of the License for any
reason be judged legally invalid or ineffective under applicable law, such
partial invalidity or ineffectiveness shall not invalidate the remainder
of the License, and in such case Affirmer hereby affirms that he or she
will not (i) exercise any of his or her remaining Copyright and Related
Rights in the Work or (ii) assert any associated claims and causes of
action with respect to the Work, in either case contrary to Affirmer's
express Statement of Purpose.
4. Limitations and Disclaimers.
a. No trademark or patent rights held by Affirmer are waived, abandoned,
surrendered, licensed or otherwise affected by this document.
b. Affirmer offers the Work as-is and makes no representations or
warranties of any kind concerning the Work, express, implied,
statutory or otherwise, including without limitation warranties of
title, merchantability, fitness for a particular purpose, non
infringement, or the absence of latent or other defects, accuracy, or
the present or absence of errors, whether or not discoverable, all to
the greatest extent permissible under applicable law.
c. Affirmer disclaims responsibility for clearing rights of other persons
that may apply to the Work or any use thereof, including without
limitation any person's Copyright and Related Rights in the Work.
Further, Affirmer disclaims responsibility for obtaining any necessary
consents, permissions or other rights required for any use of the
Work.
d. Affirmer understands and acknowledges that Creative Commons is not a
party to this document and has no duty or obligation with respect to
this CC0 or use of the Work.

34
vendor/github.com/zeebo/blake3/Makefile generated vendored Normal file
View File

@ -0,0 +1,34 @@
asm: internal/alg/hash/hash_avx2/impl_amd64.s internal/alg/compress/compress_sse41/impl_amd64.s
internal/alg/hash/hash_avx2/impl_amd64.s: avo/avx2/*.go
( cd avo; go run ./avx2 ) > internal/alg/hash/hash_avx2/impl_amd64.s
internal/alg/compress/compress_sse41/impl_amd64.s: avo/sse41/*.go
( cd avo; go run ./sse41 ) > internal/alg/compress/compress_sse41/impl_amd64.s
.PHONY: fmt
fmt:
go fmt ./...
.PHONY: clean
clean:
rm -f internal/alg/hash/hash_avx2/impl_amd64.s
rm -f internal/alg/compress/compress_sse41/impl_amd64.s
.PHONY: test
test:
go test -race -bench=. -benchtime=1x
.PHONY: vet
vet:
GOOS=linux GOARCH=386 GO386=softfloat go vet ./...
GOOS=windows GOARCH=386 GO386=softfloat go vet ./...
GOOS=linux GOARCH=amd64 go vet ./...
GOOS=windows GOARCH=amd64 go vet ./...
GOOS=darwin GOARCH=amd64 go vet ./...
GOOS=linux GOARCH=arm go vet ./...
GOOS=linux GOARCH=arm64 go vet ./...
GOOS=windows GOARCH=arm64 go vet ./...
GOOS=darwin GOARCH=arm64 go vet ./...
GOOS=js GOARCH=wasm go vet ./...
GOOS=linux GOARCH=mips go vet ./...

77
vendor/github.com/zeebo/blake3/README.md generated vendored Normal file
View File

@ -0,0 +1,77 @@
# BLAKE3
<p>
<a href="https://pkg.go.dev/github.com/zeebo/blake3"><img src="https://img.shields.io/badge/doc-reference-007d9b?logo=go&style=flat-square" alt="go.dev" /></a>
<a href="https://goreportcard.com/report/github.com/zeebo/blake3"><img src="https://goreportcard.com/badge/github.com/zeebo/blake3?style=flat-square" alt="Go Report Card" /></a>
<a href="https://sourcegraph.com/github.com/zeebo/blake3?badge"><img src="https://sourcegraph.com/github.com/zeebo/blake3/-/badge.svg?style=flat-square" alt="SourceGraph" /></a>
</p>
Pure Go implementation of [BLAKE3](https://blake3.io) with AVX2 and SSE4.1 acceleration.
Special thanks to the excellent [avo](https://github.com/mmcloughlin/avo) making writing vectorized version much easier.
# Benchmarks
## Caveats
This library makes some different design decisions than the upstream Rust crate around internal buffering. Specifically, because it does not target the embedded system space, nor does it support multithreading, it elects to do its own internal buffering. This means that a user does not have to worry about providing large enough buffers to get the best possible performance, but it does worse on smaller input sizes. So some notes:
- The Rust benchmarks below are all single-threaded to match this Go implementation.
- I make no attempt to get precise measurements (cpu throttling, noisy environment, etc.) so please benchmark on your own systems.
- These benchmarks are run on an i7-6700K which does not support AVX-512, so Rust is limited to use AVX2 at sizes above 8 kib.
- I tried my best to make them benchmark the same thing, but who knows? :smile:
## Charts
In this case, both libraries are able to avoid a lot of data copying and will use vectorized instructions to hash as fast as possible, and perform similarly.
![Large Full Buffer](/assets/large-full-buffer.svg)
For incremental writes, you must provide the Rust version large enough buffers so that it can use vectorized instructions. This Go library performs consistently regardless of the size being sent into the update function.
![Incremental](/assets/incremental.svg)
The downside of internal buffering is most apparent with small sizes as most time is spent initializing the hasher state. In terms of hashing rate, the difference is 3-4x, but in an absolute sense it's ~100ns (see tables below). If you wish to hash a large number of very small strings and you care about those nanoseconds, be sure to use the Reset method to avoid re-initializing the state.
![Small Full Buffer](/assets/small-full-buffer.svg)
## Timing Tables
### Small
| Size | Full Buffer | Reset | | Full Buffer Rate | Reset Rate |
|--------|-------------|------------|-|------------------|--------------|
| 64 b | `205ns` | `86.5ns` | | `312MB/s` | `740MB/s` |
| 256 b | `364ns` | `250ns` | | `703MB/s` | `1.03GB/s` |
| 512 b | `575ns` | `468ns` | | `892MB/s` | `1.10GB/s` |
| 768 b | `795ns` | `682ns` | | `967MB/s` | `1.13GB/s` |
### Large
| Size | Incremental | Full Buffer | Reset | | Incremental Rate | Full Buffer Rate | Reset Rate |
|----------|-------------|-------------|------------|-|------------------|------------------|--------------|
| 1 kib | `1.02µs` | `1.01µs` | `891ns` | | `1.00GB/s` | `1.01GB/s` | `1.15GB/s` |
| 2 kib | `2.11µs` | `2.07µs` | `1.95µs` | | `968MB/s` | `990MB/s` | `1.05GB/s` |
| 4 kib | `2.28µs` | `2.15µs` | `2.05µs` | | `1.80GB/s` | `1.90GB/s` | `2.00GB/s` |
| 8 kib | `2.64µs` | `2.52µs` | `2.44µs` | | `3.11GB/s` | `3.25GB/s` | `3.36GB/s` |
| 16 kib | `4.93µs` | `4.54µs` | `4.48µs` | | `3.33GB/s` | `3.61GB/s` | `3.66GB/s` |
| 32 kib | `9.41µs` | `8.62µs` | `8.54µs` | | `3.48GB/s` | `3.80GB/s` | `3.84GB/s` |
| 64 kib | `18.2µs` | `16.7µs` | `16.6µs` | | `3.59GB/s` | `3.91GB/s` | `3.94GB/s` |
| 128 kib | `36.3µs` | `32.9µs` | `33.1µs` | | `3.61GB/s` | `3.99GB/s` | `3.96GB/s` |
| 256 kib | `72.5µs` | `65.7µs` | `66.0µs` | | `3.62GB/s` | `3.99GB/s` | `3.97GB/s` |
| 512 kib | `145µs` | `131µs` | `132µs` | | `3.60GB/s` | `4.00GB/s` | `3.97GB/s` |
| 1024 kib | `290µs` | `262µs` | `262µs` | | `3.62GB/s` | `4.00GB/s` | `4.00GB/s` |
### No ASM
| Size | Incremental | Full Buffer | Reset | | Incremental Rate | Full Buffer Rate | Reset Rate |
|----------|-------------|-------------|------------|-|------------------|------------------|-------------|
| 64 b | `253ns` | `254ns` | `134ns` | | `253MB/s` | `252MB/s` | `478MB/s` |
| 256 b | `553ns` | `557ns` | `441ns` | | `463MB/s` | `459MB/s` | `580MB/s` |
| 512 b | `948ns` | `953ns` | `841ns` | | `540MB/s` | `538MB/s` | `609MB/s` |
| 768 b | `1.38µs` | `1.40µs` | `1.35µs` | | `558MB/s` | `547MB/s` | `570MB/s` |
| 1 kib | `1.77µs` | `1.77µs` | `1.70µs` | | `577MB/s` | `580MB/s` | `602MB/s` |
| | | | | | | | |
| 1024 kib | `880µs` | `883µs` | `878µs` | | `596MB/s` | `595MB/s` | `598MB/s` |
The speed caps out at around 1 kib, so most rows have been elided from the presentation.

165
vendor/github.com/zeebo/blake3/api.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
// Package blake3 provides an SSE4.1/AVX2 accelerated BLAKE3 implementation.
package blake3
import (
"errors"
"github.com/zeebo/blake3/internal/consts"
"github.com/zeebo/blake3/internal/utils"
)
// Hasher is a hash.Hash for BLAKE3.
type Hasher struct {
size int
h hasher
}
// New returns a new Hasher that has a digest size of 32 bytes.
//
// If you need more or less output bytes than that, use Digest method.
func New() *Hasher {
return &Hasher{
size: 32,
h: hasher{
key: consts.IV,
},
}
}
// NewKeyed returns a new Hasher that uses the 32 byte input key and has
// a digest size of 32 bytes.
//
// If you need more or less output bytes than that, use the Digest method.
func NewKeyed(key []byte) (*Hasher, error) {
if len(key) != 32 {
return nil, errors.New("invalid key size")
}
h := &Hasher{
size: 32,
h: hasher{
flags: consts.Flag_Keyed,
},
}
utils.KeyFromBytes(key, &h.h.key)
return h, nil
}
// DeriveKey derives a key based on reusable key material of any
// length, in the given context. The key will be stored in out, using
// all of its current length.
//
// Context strings must be hardcoded constants, and the recommended
// format is "[application] [commit timestamp] [purpose]", e.g.,
// "example.com 2019-12-25 16:18:03 session tokens v1".
func DeriveKey(context string, material []byte, out []byte) {
h := NewDeriveKey(context)
_, _ = h.Write(material)
_, _ = h.Digest().Read(out)
}
// NewDeriveKey returns a Hasher that is initialized with the context
// string. See DeriveKey for details. It has a digest size of 32 bytes.
//
// If you need more or less output bytes than that, use the Digest method.
func NewDeriveKey(context string) *Hasher {
// hash the context string and use that instead of IV
h := &Hasher{
size: 32,
h: hasher{
key: consts.IV,
flags: consts.Flag_DeriveKeyContext,
},
}
var buf [32]byte
_, _ = h.WriteString(context)
_, _ = h.Digest().Read(buf[:])
h.Reset()
utils.KeyFromBytes(buf[:], &h.h.key)
h.h.flags = consts.Flag_DeriveKeyMaterial
return h
}
// Write implements part of the hash.Hash interface. It never returns an error.
func (h *Hasher) Write(p []byte) (int, error) {
h.h.update(p)
return len(p), nil
}
// WriteString is like Write but specialized to strings to avoid allocations.
func (h *Hasher) WriteString(p string) (int, error) {
h.h.updateString(p)
return len(p), nil
}
// Reset implements part of the hash.Hash interface. It causes the Hasher to
// act as if it was newly created.
func (h *Hasher) Reset() {
h.h.reset()
}
// Clone returns a new Hasher with the same internal state.
//
// Modifying the resulting Hasher will not modify the original Hasher, and vice versa.
func (h *Hasher) Clone() *Hasher {
return &Hasher{size: h.size, h: h.h}
}
// Size implements part of the hash.Hash interface. It returns the number of
// bytes the hash will output in Sum.
func (h *Hasher) Size() int {
return h.size
}
// BlockSize implements part of the hash.Hash interface. It returns the most
// natural size to write to the Hasher.
func (h *Hasher) BlockSize() int {
return 64
}
// Sum implements part of the hash.Hash interface. It appends the digest of
// the Hasher to the provided buffer and returns it.
func (h *Hasher) Sum(b []byte) []byte {
if top := len(b) + h.size; top <= cap(b) && top >= len(b) {
h.h.finalize(b[len(b):top])
return b[:top]
}
tmp := make([]byte, h.size)
h.h.finalize(tmp)
return append(b, tmp...)
}
// Digest takes a snapshot of the hash state and returns an object that can
// be used to read and seek through 2^64 bytes of digest output.
func (h *Hasher) Digest() *Digest {
var d Digest
h.h.finalizeDigest(&d)
return &d
}
// Sum256 returns the first 256 bits of the unkeyed digest of the data.
func Sum256(data []byte) (sum [32]byte) {
out := Sum512(data)
copy(sum[:], out[:32])
return sum
}
// Sum512 returns the first 512 bits of the unkeyed digest of the data.
func Sum512(data []byte) (sum [64]byte) {
if len(data) <= consts.ChunkLen {
var d Digest
compressAll(&d, data, 0, consts.IV)
_, _ = d.Read(sum[:])
return sum
} else {
h := hasher{key: consts.IV}
h.update(data)
h.finalize(sum[:])
return sum
}
}

285
vendor/github.com/zeebo/blake3/blake3.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
package blake3
import (
"math/bits"
"unsafe"
"github.com/zeebo/blake3/internal/alg"
"github.com/zeebo/blake3/internal/consts"
"github.com/zeebo/blake3/internal/utils"
)
//
// hasher contains state for a blake3 hash
//
type hasher struct {
len uint64
chunks uint64
flags uint32
key [8]uint32
stack cvstack
buf [8192]byte
}
func (a *hasher) reset() {
a.len = 0
a.chunks = 0
a.stack.occ = 0
a.stack.lvls = [8]uint8{}
a.stack.bufn = 0
}
func (a *hasher) update(buf []byte) {
// relies on the first two words of a string being the same as a slice
a.updateString(*(*string)(unsafe.Pointer(&buf)))
}
func (a *hasher) updateString(buf string) {
var input *[8192]byte
for len(buf) > 0 {
if a.len == 0 && len(buf) > 8192 {
// relies on the data pointer being the first word in the string header
input = (*[8192]byte)(*(*unsafe.Pointer)(unsafe.Pointer(&buf)))
buf = buf[8192:]
} else if a.len < 8192 {
n := copy(a.buf[a.len:], buf)
a.len += uint64(n)
buf = buf[n:]
continue
} else {
input = &a.buf
}
a.consume(input)
a.len = 0
a.chunks += 8
}
}
func (a *hasher) consume(input *[8192]byte) {
var out chainVector
var chain [8]uint32
alg.HashF(input, 8192, a.chunks, a.flags, &a.key, &out, &chain)
a.stack.pushN(0, &out, 8, a.flags, &a.key)
}
func (a *hasher) finalize(p []byte) {
var d Digest
a.finalizeDigest(&d)
_, _ = d.Read(p)
}
func (a *hasher) finalizeDigest(d *Digest) {
if a.chunks == 0 && a.len <= consts.ChunkLen {
compressAll(d, a.buf[:a.len], a.flags, a.key)
return
}
d.chain = a.key
d.flags = a.flags | consts.Flag_ChunkEnd
if a.len > 64 {
var buf chainVector
alg.HashF(&a.buf, a.len, a.chunks, a.flags, &a.key, &buf, &d.chain)
if a.len > consts.ChunkLen {
complete := (a.len - 1) / consts.ChunkLen
a.stack.pushN(0, &buf, int(complete), a.flags, &a.key)
a.chunks += complete
a.len = uint64(copy(a.buf[:], a.buf[complete*consts.ChunkLen:a.len]))
}
}
if a.len <= 64 {
d.flags |= consts.Flag_ChunkStart
}
d.counter = a.chunks
d.blen = uint32(a.len) % 64
base := a.len / 64 * 64
if a.len > 0 && d.blen == 0 {
d.blen = 64
base -= 64
}
if consts.OptimizeLittleEndian {
copy((*[64]byte)(unsafe.Pointer(&d.block[0]))[:], a.buf[base:a.len])
} else {
var tmp [64]byte
copy(tmp[:], a.buf[base:a.len])
utils.BytesToWords(&tmp, &d.block)
}
for a.stack.bufn > 0 {
a.stack.flush(a.flags, &a.key)
}
var tmp [16]uint32
for occ := a.stack.occ; occ != 0; occ &= occ - 1 {
col := uint(bits.TrailingZeros64(occ)) % 64
alg.Compress(&d.chain, &d.block, d.counter, d.blen, d.flags, &tmp)
*(*[8]uint32)(unsafe.Pointer(&d.block[0])) = a.stack.stack[col]
*(*[8]uint32)(unsafe.Pointer(&d.block[8])) = *(*[8]uint32)(unsafe.Pointer(&tmp[0]))
if occ == a.stack.occ {
d.chain = a.key
d.counter = 0
d.blen = consts.BlockLen
d.flags = a.flags | consts.Flag_Parent
}
}
d.flags |= consts.Flag_Root
}
//
// chain value stack
//
type chainVector = [64]uint32
type cvstack struct {
occ uint64 // which levels in stack are occupied
lvls [8]uint8 // what level the buf input was in
bufn int // how many pairs are loaded into buf
buf [2]chainVector
stack [64][8]uint32
}
func (a *cvstack) pushN(l uint8, cv *chainVector, n int, flags uint32, key *[8]uint32) {
for i := 0; i < n; i++ {
a.pushL(l, cv, i)
for a.bufn == 8 {
a.flush(flags, key)
}
}
}
func (a *cvstack) pushL(l uint8, cv *chainVector, n int) {
bit := uint64(1) << (l & 63)
if a.occ&bit == 0 {
readChain(cv, n, &a.stack[l&63])
a.occ ^= bit
return
}
a.lvls[a.bufn&7] = l
writeChain(&a.stack[l&63], &a.buf[0], a.bufn)
copyChain(cv, n, &a.buf[1], a.bufn)
a.bufn++
a.occ ^= bit
}
func (a *cvstack) flush(flags uint32, key *[8]uint32) {
var out chainVector
alg.HashP(&a.buf[0], &a.buf[1], flags|consts.Flag_Parent, key, &out, a.bufn)
bufn, lvls := a.bufn, a.lvls
a.bufn, a.lvls = 0, [8]uint8{}
for i := 0; i < bufn; i++ {
a.pushL(lvls[i]+1, &out, i)
}
}
//
// helpers to deal with reading/writing transposed values
//
func copyChain(in *chainVector, icol int, out *chainVector, ocol int) {
type u = uintptr
type p = unsafe.Pointer
type a = *uint32
i := p(u(p(in)) + u(icol*4))
o := p(u(p(out)) + u(ocol*4))
*a(p(u(o) + 0*32)) = *a(p(u(i) + 0*32))
*a(p(u(o) + 1*32)) = *a(p(u(i) + 1*32))
*a(p(u(o) + 2*32)) = *a(p(u(i) + 2*32))
*a(p(u(o) + 3*32)) = *a(p(u(i) + 3*32))
*a(p(u(o) + 4*32)) = *a(p(u(i) + 4*32))
*a(p(u(o) + 5*32)) = *a(p(u(i) + 5*32))
*a(p(u(o) + 6*32)) = *a(p(u(i) + 6*32))
*a(p(u(o) + 7*32)) = *a(p(u(i) + 7*32))
}
func readChain(in *chainVector, col int, out *[8]uint32) {
type u = uintptr
type p = unsafe.Pointer
type a = *uint32
i := p(u(p(in)) + u(col*4))
out[0] = *a(p(u(i) + 0*32))
out[1] = *a(p(u(i) + 1*32))
out[2] = *a(p(u(i) + 2*32))
out[3] = *a(p(u(i) + 3*32))
out[4] = *a(p(u(i) + 4*32))
out[5] = *a(p(u(i) + 5*32))
out[6] = *a(p(u(i) + 6*32))
out[7] = *a(p(u(i) + 7*32))
}
func writeChain(in *[8]uint32, out *chainVector, col int) {
type u = uintptr
type p = unsafe.Pointer
type a = *uint32
o := p(u(p(out)) + u(col*4))
*a(p(u(o) + 0*32)) = in[0]
*a(p(u(o) + 1*32)) = in[1]
*a(p(u(o) + 2*32)) = in[2]
*a(p(u(o) + 3*32)) = in[3]
*a(p(u(o) + 4*32)) = in[4]
*a(p(u(o) + 5*32)) = in[5]
*a(p(u(o) + 6*32)) = in[6]
*a(p(u(o) + 7*32)) = in[7]
}
//
// compress <= chunkLen bytes in one shot
//
func compressAll(d *Digest, in []byte, flags uint32, key [8]uint32) {
var compressed [16]uint32
d.chain = key
d.flags = flags | consts.Flag_ChunkStart
for len(in) > 64 {
buf := (*[64]byte)(unsafe.Pointer(&in[0]))
var block *[16]uint32
if consts.OptimizeLittleEndian {
block = (*[16]uint32)(unsafe.Pointer(buf))
} else {
block = &d.block
utils.BytesToWords(buf, block)
}
alg.Compress(&d.chain, block, 0, consts.BlockLen, d.flags, &compressed)
d.chain = *(*[8]uint32)(unsafe.Pointer(&compressed[0]))
d.flags &^= consts.Flag_ChunkStart
in = in[64:]
}
if consts.OptimizeLittleEndian {
copy((*[64]byte)(unsafe.Pointer(&d.block[0]))[:], in)
} else {
var tmp [64]byte
copy(tmp[:], in)
utils.BytesToWords(&tmp, &d.block)
}
d.blen = uint32(len(in))
d.flags |= consts.Flag_ChunkEnd | consts.Flag_Root
}

100
vendor/github.com/zeebo/blake3/digest.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
package blake3
import (
"fmt"
"io"
"unsafe"
"github.com/zeebo/blake3/internal/alg"
"github.com/zeebo/blake3/internal/consts"
"github.com/zeebo/blake3/internal/utils"
)
// Digest captures the state of a Hasher allowing reading and seeking through
// the output stream.
type Digest struct {
counter uint64
chain [8]uint32
block [16]uint32
blen uint32
flags uint32
buf [16]uint32
bufn int
}
// Read reads data frm the hasher into out. It always fills the entire buffer and
// never errors. The stream will wrap around when reading past 2^64 bytes.
func (d *Digest) Read(p []byte) (n int, err error) {
n = len(p)
if d.bufn > 0 {
n := d.slowCopy(p)
p = p[n:]
d.bufn -= n
}
for len(p) >= 64 {
d.fillBuf()
if consts.OptimizeLittleEndian {
*(*[64]byte)(unsafe.Pointer(&p[0])) = *(*[64]byte)(unsafe.Pointer(&d.buf[0]))
} else {
utils.WordsToBytes(&d.buf, p)
}
p = p[64:]
d.bufn = 0
}
if len(p) == 0 {
return n, nil
}
d.fillBuf()
d.bufn -= d.slowCopy(p)
return n, nil
}
// Seek sets the position to the provided location. Only SeekStart and
// SeekCurrent are allowed.
func (d *Digest) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
case io.SeekEnd:
return 0, fmt.Errorf("seek from end not supported")
case io.SeekCurrent:
offset += int64(consts.BlockLen*d.counter) - int64(d.bufn)
default:
return 0, fmt.Errorf("invalid whence: %d", whence)
}
if offset < 0 {
return 0, fmt.Errorf("seek before start")
}
d.setPosition(uint64(offset))
return offset, nil
}
func (d *Digest) setPosition(pos uint64) {
d.counter = pos / consts.BlockLen
d.fillBuf()
d.bufn -= int(pos % consts.BlockLen)
}
func (d *Digest) slowCopy(p []byte) (n int) {
off := uint(consts.BlockLen-d.bufn) % consts.BlockLen
if consts.OptimizeLittleEndian {
n = copy(p, (*[consts.BlockLen]byte)(unsafe.Pointer(&d.buf[0]))[off:])
} else {
var tmp [consts.BlockLen]byte
utils.WordsToBytes(&d.buf, tmp[:])
n = copy(p, tmp[off:])
}
return n
}
func (d *Digest) fillBuf() {
alg.Compress(&d.chain, &d.block, d.counter, d.blen, d.flags, &d.buf)
d.counter++
d.bufn = consts.BlockLen
}

18
vendor/github.com/zeebo/blake3/internal/alg/alg.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package alg
import (
"github.com/zeebo/blake3/internal/alg/compress"
"github.com/zeebo/blake3/internal/alg/hash"
)
func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32) {
hash.HashF(input, length, counter, flags, key, out, chain)
}
func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int) {
hash.HashP(left, right, flags, key, out, n)
}
func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32) {
compress.Compress(chain, block, counter, blen, flags, out)
}

View File

@ -0,0 +1,15 @@
package compress
import (
"github.com/zeebo/blake3/internal/alg/compress/compress_pure"
"github.com/zeebo/blake3/internal/alg/compress/compress_sse41"
"github.com/zeebo/blake3/internal/consts"
)
func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32) {
if consts.HasSSE41 {
compress_sse41.Compress(chain, block, counter, blen, flags, out)
} else {
compress_pure.Compress(chain, block, counter, blen, flags, out)
}
}

View File

@ -0,0 +1,135 @@
package compress_pure
import (
"math/bits"
"github.com/zeebo/blake3/internal/consts"
)
func Compress(
chain *[8]uint32,
block *[16]uint32,
counter uint64,
blen uint32,
flags uint32,
out *[16]uint32,
) {
*out = [16]uint32{
chain[0], chain[1], chain[2], chain[3],
chain[4], chain[5], chain[6], chain[7],
consts.IV0, consts.IV1, consts.IV2, consts.IV3,
uint32(counter), uint32(counter >> 32), blen, flags,
}
rcompress(out, block)
}
func g(a, b, c, d, mx, my uint32) (uint32, uint32, uint32, uint32) {
a += b + mx
d = bits.RotateLeft32(d^a, -16)
c += d
b = bits.RotateLeft32(b^c, -12)
a += b + my
d = bits.RotateLeft32(d^a, -8)
c += d
b = bits.RotateLeft32(b^c, -7)
return a, b, c, d
}
func rcompress(s *[16]uint32, m *[16]uint32) {
const (
a = 10
b = 11
c = 12
d = 13
e = 14
f = 15
)
s0, s1, s2, s3 := s[0+0], s[0+1], s[0+2], s[0+3]
s4, s5, s6, s7 := s[0+4], s[0+5], s[0+6], s[0+7]
s8, s9, sa, sb := s[8+0], s[8+1], s[8+2], s[8+3]
sc, sd, se, sf := s[8+4], s[8+5], s[8+6], s[8+7]
s0, s4, s8, sc = g(s0, s4, s8, sc, m[0], m[1])
s1, s5, s9, sd = g(s1, s5, s9, sd, m[2], m[3])
s2, s6, sa, se = g(s2, s6, sa, se, m[4], m[5])
s3, s7, sb, sf = g(s3, s7, sb, sf, m[6], m[7])
s0, s5, sa, sf = g(s0, s5, sa, sf, m[8], m[9])
s1, s6, sb, sc = g(s1, s6, sb, sc, m[a], m[b])
s2, s7, s8, sd = g(s2, s7, s8, sd, m[c], m[d])
s3, s4, s9, se = g(s3, s4, s9, se, m[e], m[f])
s0, s4, s8, sc = g(s0, s4, s8, sc, m[2], m[6])
s1, s5, s9, sd = g(s1, s5, s9, sd, m[3], m[a])
s2, s6, sa, se = g(s2, s6, sa, se, m[7], m[0])
s3, s7, sb, sf = g(s3, s7, sb, sf, m[4], m[d])
s0, s5, sa, sf = g(s0, s5, sa, sf, m[1], m[b])
s1, s6, sb, sc = g(s1, s6, sb, sc, m[c], m[5])
s2, s7, s8, sd = g(s2, s7, s8, sd, m[9], m[e])
s3, s4, s9, se = g(s3, s4, s9, se, m[f], m[8])
s0, s4, s8, sc = g(s0, s4, s8, sc, m[3], m[4])
s1, s5, s9, sd = g(s1, s5, s9, sd, m[a], m[c])
s2, s6, sa, se = g(s2, s6, sa, se, m[d], m[2])
s3, s7, sb, sf = g(s3, s7, sb, sf, m[7], m[e])
s0, s5, sa, sf = g(s0, s5, sa, sf, m[6], m[5])
s1, s6, sb, sc = g(s1, s6, sb, sc, m[9], m[0])
s2, s7, s8, sd = g(s2, s7, s8, sd, m[b], m[f])
s3, s4, s9, se = g(s3, s4, s9, se, m[8], m[1])
s0, s4, s8, sc = g(s0, s4, s8, sc, m[a], m[7])
s1, s5, s9, sd = g(s1, s5, s9, sd, m[c], m[9])
s2, s6, sa, se = g(s2, s6, sa, se, m[e], m[3])
s3, s7, sb, sf = g(s3, s7, sb, sf, m[d], m[f])
s0, s5, sa, sf = g(s0, s5, sa, sf, m[4], m[0])
s1, s6, sb, sc = g(s1, s6, sb, sc, m[b], m[2])
s2, s7, s8, sd = g(s2, s7, s8, sd, m[5], m[8])
s3, s4, s9, se = g(s3, s4, s9, se, m[1], m[6])
s0, s4, s8, sc = g(s0, s4, s8, sc, m[c], m[d])
s1, s5, s9, sd = g(s1, s5, s9, sd, m[9], m[b])
s2, s6, sa, se = g(s2, s6, sa, se, m[f], m[a])
s3, s7, sb, sf = g(s3, s7, sb, sf, m[e], m[8])
s0, s5, sa, sf = g(s0, s5, sa, sf, m[7], m[2])
s1, s6, sb, sc = g(s1, s6, sb, sc, m[5], m[3])
s2, s7, s8, sd = g(s2, s7, s8, sd, m[0], m[1])
s3, s4, s9, se = g(s3, s4, s9, se, m[6], m[4])
s0, s4, s8, sc = g(s0, s4, s8, sc, m[9], m[e])
s1, s5, s9, sd = g(s1, s5, s9, sd, m[b], m[5])
s2, s6, sa, se = g(s2, s6, sa, se, m[8], m[c])
s3, s7, sb, sf = g(s3, s7, sb, sf, m[f], m[1])
s0, s5, sa, sf = g(s0, s5, sa, sf, m[d], m[3])
s1, s6, sb, sc = g(s1, s6, sb, sc, m[0], m[a])
s2, s7, s8, sd = g(s2, s7, s8, sd, m[2], m[6])
s3, s4, s9, se = g(s3, s4, s9, se, m[4], m[7])
s0, s4, s8, sc = g(s0, s4, s8, sc, m[b], m[f])
s1, s5, s9, sd = g(s1, s5, s9, sd, m[5], m[0])
s2, s6, sa, se = g(s2, s6, sa, se, m[1], m[9])
s3, s7, sb, sf = g(s3, s7, sb, sf, m[8], m[6])
s0, s5, sa, sf = g(s0, s5, sa, sf, m[e], m[a])
s1, s6, sb, sc = g(s1, s6, sb, sc, m[2], m[c])
s2, s7, s8, sd = g(s2, s7, s8, sd, m[3], m[4])
s3, s4, s9, se = g(s3, s4, s9, se, m[7], m[d])
s[8+0] = s8 ^ s[0]
s[8+1] = s9 ^ s[1]
s[8+2] = sa ^ s[2]
s[8+3] = sb ^ s[3]
s[8+4] = sc ^ s[4]
s[8+5] = sd ^ s[5]
s[8+6] = se ^ s[6]
s[8+7] = sf ^ s[7]
s[0] = s0 ^ s8
s[1] = s1 ^ s9
s[2] = s2 ^ sa
s[3] = s3 ^ sb
s[4] = s4 ^ sc
s[5] = s5 ^ sd
s[6] = s6 ^ se
s[7] = s7 ^ sf
}

View File

@ -0,0 +1,560 @@
// Code generated by command: go run compress.go. DO NOT EDIT.
#include "textflag.h"
DATA iv<>+0(SB)/4, $0x6a09e667
DATA iv<>+4(SB)/4, $0xbb67ae85
DATA iv<>+8(SB)/4, $0x3c6ef372
DATA iv<>+12(SB)/4, $0xa54ff53a
DATA iv<>+16(SB)/4, $0x510e527f
DATA iv<>+20(SB)/4, $0x9b05688c
DATA iv<>+24(SB)/4, $0x1f83d9ab
DATA iv<>+28(SB)/4, $0x5be0cd19
GLOBL iv<>(SB), RODATA|NOPTR, $32
DATA rot16_shuf<>+0(SB)/1, $0x02
DATA rot16_shuf<>+1(SB)/1, $0x03
DATA rot16_shuf<>+2(SB)/1, $0x00
DATA rot16_shuf<>+3(SB)/1, $0x01
DATA rot16_shuf<>+4(SB)/1, $0x06
DATA rot16_shuf<>+5(SB)/1, $0x07
DATA rot16_shuf<>+6(SB)/1, $0x04
DATA rot16_shuf<>+7(SB)/1, $0x05
DATA rot16_shuf<>+8(SB)/1, $0x0a
DATA rot16_shuf<>+9(SB)/1, $0x0b
DATA rot16_shuf<>+10(SB)/1, $0x08
DATA rot16_shuf<>+11(SB)/1, $0x09
DATA rot16_shuf<>+12(SB)/1, $0x0e
DATA rot16_shuf<>+13(SB)/1, $0x0f
DATA rot16_shuf<>+14(SB)/1, $0x0c
DATA rot16_shuf<>+15(SB)/1, $0x0d
DATA rot16_shuf<>+16(SB)/1, $0x12
DATA rot16_shuf<>+17(SB)/1, $0x13
DATA rot16_shuf<>+18(SB)/1, $0x10
DATA rot16_shuf<>+19(SB)/1, $0x11
DATA rot16_shuf<>+20(SB)/1, $0x16
DATA rot16_shuf<>+21(SB)/1, $0x17
DATA rot16_shuf<>+22(SB)/1, $0x14
DATA rot16_shuf<>+23(SB)/1, $0x15
DATA rot16_shuf<>+24(SB)/1, $0x1a
DATA rot16_shuf<>+25(SB)/1, $0x1b
DATA rot16_shuf<>+26(SB)/1, $0x18
DATA rot16_shuf<>+27(SB)/1, $0x19
DATA rot16_shuf<>+28(SB)/1, $0x1e
DATA rot16_shuf<>+29(SB)/1, $0x1f
DATA rot16_shuf<>+30(SB)/1, $0x1c
DATA rot16_shuf<>+31(SB)/1, $0x1d
GLOBL rot16_shuf<>(SB), RODATA|NOPTR, $32
DATA rot8_shuf<>+0(SB)/1, $0x01
DATA rot8_shuf<>+1(SB)/1, $0x02
DATA rot8_shuf<>+2(SB)/1, $0x03
DATA rot8_shuf<>+3(SB)/1, $0x00
DATA rot8_shuf<>+4(SB)/1, $0x05
DATA rot8_shuf<>+5(SB)/1, $0x06
DATA rot8_shuf<>+6(SB)/1, $0x07
DATA rot8_shuf<>+7(SB)/1, $0x04
DATA rot8_shuf<>+8(SB)/1, $0x09
DATA rot8_shuf<>+9(SB)/1, $0x0a
DATA rot8_shuf<>+10(SB)/1, $0x0b
DATA rot8_shuf<>+11(SB)/1, $0x08
DATA rot8_shuf<>+12(SB)/1, $0x0d
DATA rot8_shuf<>+13(SB)/1, $0x0e
DATA rot8_shuf<>+14(SB)/1, $0x0f
DATA rot8_shuf<>+15(SB)/1, $0x0c
DATA rot8_shuf<>+16(SB)/1, $0x11
DATA rot8_shuf<>+17(SB)/1, $0x12
DATA rot8_shuf<>+18(SB)/1, $0x13
DATA rot8_shuf<>+19(SB)/1, $0x10
DATA rot8_shuf<>+20(SB)/1, $0x15
DATA rot8_shuf<>+21(SB)/1, $0x16
DATA rot8_shuf<>+22(SB)/1, $0x17
DATA rot8_shuf<>+23(SB)/1, $0x14
DATA rot8_shuf<>+24(SB)/1, $0x19
DATA rot8_shuf<>+25(SB)/1, $0x1a
DATA rot8_shuf<>+26(SB)/1, $0x1b
DATA rot8_shuf<>+27(SB)/1, $0x18
DATA rot8_shuf<>+28(SB)/1, $0x1d
DATA rot8_shuf<>+29(SB)/1, $0x1e
DATA rot8_shuf<>+30(SB)/1, $0x1f
DATA rot8_shuf<>+31(SB)/1, $0x1c
GLOBL rot8_shuf<>(SB), RODATA|NOPTR, $32
// func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32)
// Requires: SSE, SSE2, SSE4.1, SSSE3
TEXT ·Compress(SB), NOSPLIT, $0-40
MOVQ chain+0(FP), AX
MOVQ block+8(FP), CX
MOVQ counter+16(FP), DX
MOVL blen+24(FP), BX
MOVL flags+28(FP), SI
MOVQ out+32(FP), DI
MOVUPS (AX), X0
MOVUPS 16(AX), X1
MOVUPS iv<>+0(SB), X2
PINSRD $0x00, DX, X3
SHRQ $0x20, DX
PINSRD $0x01, DX, X3
PINSRD $0x02, BX, X3
PINSRD $0x03, SI, X3
MOVUPS (CX), X4
MOVUPS 16(CX), X5
MOVUPS 32(CX), X6
MOVUPS 48(CX), X7
MOVUPS rot16_shuf<>+0(SB), X8
MOVUPS rot8_shuf<>+0(SB), X9
// round 1
MOVAPS X4, X10
SHUFPS $0x88, X5, X10
PADDD X10, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X11
PSRLL $0x0c, X1
PSLLL $0x14, X11
POR X11, X1
MOVAPS X4, X4
SHUFPS $0xdd, X5, X4
PADDD X4, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x07, X1
PSLLL $0x19, X5
POR X5, X1
PSHUFD $0x93, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x39, X2, X2
MOVAPS X6, X5
SHUFPS $0x88, X7, X5
SHUFPS $0x93, X5, X5
PADDD X5, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X11
PSRLL $0x0c, X1
PSLLL $0x14, X11
POR X11, X1
MOVAPS X6, X6
SHUFPS $0xdd, X7, X6
SHUFPS $0x93, X6, X6
PADDD X6, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X7
PSRLL $0x07, X1
PSLLL $0x19, X7
POR X7, X1
PSHUFD $0x39, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x93, X2, X2
// round 2
MOVAPS X10, X7
SHUFPS $0xd6, X4, X7
SHUFPS $0x39, X7, X7
PADDD X7, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X11
PSRLL $0x0c, X1
PSLLL $0x14, X11
POR X11, X1
MOVAPS X5, X11
SHUFPS $0xfa, X6, X11
PSHUFD $0x0f, X10, X10
PBLENDW $0x33, X10, X11
PADDD X11, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X10
PSRLL $0x07, X1
PSLLL $0x19, X10
POR X10, X1
PSHUFD $0x93, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x39, X2, X2
MOVAPS X6, X12
PUNPCKLLQ X4, X12
PBLENDW $0xc0, X5, X12
SHUFPS $0xb4, X12, X12
PADDD X12, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X10
PSRLL $0x0c, X1
PSLLL $0x14, X10
POR X10, X1
MOVAPS X4, X10
PUNPCKHLQ X6, X10
MOVAPS X5, X4
PUNPCKLLQ X10, X4
SHUFPS $0x1e, X4, X4
PADDD X4, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x07, X1
PSLLL $0x19, X5
POR X5, X1
PSHUFD $0x39, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x93, X2, X2
// round 3
MOVAPS X7, X5
SHUFPS $0xd6, X11, X5
SHUFPS $0x39, X5, X5
PADDD X5, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X6
PSRLL $0x0c, X1
PSLLL $0x14, X6
POR X6, X1
MOVAPS X12, X6
SHUFPS $0xfa, X4, X6
PSHUFD $0x0f, X7, X7
PBLENDW $0x33, X7, X6
PADDD X6, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X7
PSRLL $0x07, X1
PSLLL $0x19, X7
POR X7, X1
PSHUFD $0x93, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x39, X2, X2
MOVAPS X4, X10
PUNPCKLLQ X11, X10
PBLENDW $0xc0, X12, X10
SHUFPS $0xb4, X10, X10
PADDD X10, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X7
PSRLL $0x0c, X1
PSLLL $0x14, X7
POR X7, X1
MOVAPS X11, X7
PUNPCKHLQ X4, X7
MOVAPS X12, X4
PUNPCKLLQ X7, X4
SHUFPS $0x1e, X4, X4
PADDD X4, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X7
PSRLL $0x07, X1
PSLLL $0x19, X7
POR X7, X1
PSHUFD $0x39, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x93, X2, X2
// round 4
MOVAPS X5, X7
SHUFPS $0xd6, X6, X7
SHUFPS $0x39, X7, X7
PADDD X7, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X11
PSRLL $0x0c, X1
PSLLL $0x14, X11
POR X11, X1
MOVAPS X10, X11
SHUFPS $0xfa, X4, X11
PSHUFD $0x0f, X5, X5
PBLENDW $0x33, X5, X11
PADDD X11, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x07, X1
PSLLL $0x19, X5
POR X5, X1
PSHUFD $0x93, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x39, X2, X2
MOVAPS X4, X12
PUNPCKLLQ X6, X12
PBLENDW $0xc0, X10, X12
SHUFPS $0xb4, X12, X12
PADDD X12, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x0c, X1
PSLLL $0x14, X5
POR X5, X1
MOVAPS X6, X5
PUNPCKHLQ X4, X5
MOVAPS X10, X4
PUNPCKLLQ X5, X4
SHUFPS $0x1e, X4, X4
PADDD X4, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x07, X1
PSLLL $0x19, X5
POR X5, X1
PSHUFD $0x39, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x93, X2, X2
// round 5
MOVAPS X7, X5
SHUFPS $0xd6, X11, X5
SHUFPS $0x39, X5, X5
PADDD X5, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X6
PSRLL $0x0c, X1
PSLLL $0x14, X6
POR X6, X1
MOVAPS X12, X6
SHUFPS $0xfa, X4, X6
PSHUFD $0x0f, X7, X7
PBLENDW $0x33, X7, X6
PADDD X6, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X7
PSRLL $0x07, X1
PSLLL $0x19, X7
POR X7, X1
PSHUFD $0x93, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x39, X2, X2
MOVAPS X4, X10
PUNPCKLLQ X11, X10
PBLENDW $0xc0, X12, X10
SHUFPS $0xb4, X10, X10
PADDD X10, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X7
PSRLL $0x0c, X1
PSLLL $0x14, X7
POR X7, X1
MOVAPS X11, X7
PUNPCKHLQ X4, X7
MOVAPS X12, X4
PUNPCKLLQ X7, X4
SHUFPS $0x1e, X4, X4
PADDD X4, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X7
PSRLL $0x07, X1
PSLLL $0x19, X7
POR X7, X1
PSHUFD $0x39, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x93, X2, X2
// round 6
MOVAPS X5, X7
SHUFPS $0xd6, X6, X7
SHUFPS $0x39, X7, X7
PADDD X7, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X11
PSRLL $0x0c, X1
PSLLL $0x14, X11
POR X11, X1
MOVAPS X10, X11
SHUFPS $0xfa, X4, X11
PSHUFD $0x0f, X5, X5
PBLENDW $0x33, X5, X11
PADDD X11, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x07, X1
PSLLL $0x19, X5
POR X5, X1
PSHUFD $0x93, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x39, X2, X2
MOVAPS X4, X12
PUNPCKLLQ X6, X12
PBLENDW $0xc0, X10, X12
SHUFPS $0xb4, X12, X12
PADDD X12, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x0c, X1
PSLLL $0x14, X5
POR X5, X1
MOVAPS X6, X5
PUNPCKHLQ X4, X5
MOVAPS X10, X4
PUNPCKLLQ X5, X4
SHUFPS $0x1e, X4, X4
PADDD X4, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x07, X1
PSLLL $0x19, X5
POR X5, X1
PSHUFD $0x39, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x93, X2, X2
// round 7
MOVAPS X7, X5
SHUFPS $0xd6, X11, X5
SHUFPS $0x39, X5, X5
PADDD X5, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x0c, X1
PSLLL $0x14, X5
POR X5, X1
MOVAPS X12, X5
SHUFPS $0xfa, X4, X5
PSHUFD $0x0f, X7, X6
PBLENDW $0x33, X6, X5
PADDD X5, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x07, X1
PSLLL $0x19, X5
POR X5, X1
PSHUFD $0x93, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x39, X2, X2
MOVAPS X4, X5
PUNPCKLLQ X11, X5
PBLENDW $0xc0, X12, X5
SHUFPS $0xb4, X5, X5
PADDD X5, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X8, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X5
PSRLL $0x0c, X1
PSLLL $0x14, X5
POR X5, X1
MOVAPS X11, X6
PUNPCKHLQ X4, X6
MOVAPS X12, X4
PUNPCKLLQ X6, X4
SHUFPS $0x1e, X4, X4
PADDD X4, X0
PADDD X1, X0
PXOR X0, X3
PSHUFB X9, X3
PADDD X3, X2
PXOR X2, X1
MOVAPS X1, X4
PSRLL $0x07, X1
PSLLL $0x19, X4
POR X4, X1
PSHUFD $0x39, X0, X0
PSHUFD $0x4e, X3, X3
PSHUFD $0x93, X2, X2
// finalize
PXOR X2, X0
PXOR X3, X1
MOVUPS (AX), X4
PXOR X4, X2
MOVUPS 16(AX), X4
PXOR X4, X3
MOVUPS X0, (DI)
MOVUPS X1, 16(DI)
MOVUPS X2, 32(DI)
MOVUPS X3, 48(DI)
RET

View File

@ -0,0 +1,10 @@
//go:build !amd64
// +build !amd64
package compress_sse41
import "github.com/zeebo/blake3/internal/alg/compress/compress_pure"
func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32) {
compress_pure.Compress(chain, block, counter, blen, flags, out)
}

View File

@ -0,0 +1,7 @@
//go:build amd64
// +build amd64
package compress_sse41
//go:noescape
func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32)

View File

@ -0,0 +1,23 @@
package hash
import (
"github.com/zeebo/blake3/internal/alg/hash/hash_avx2"
"github.com/zeebo/blake3/internal/alg/hash/hash_pure"
"github.com/zeebo/blake3/internal/consts"
)
func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32) {
if consts.HasAVX2 && length > 2*consts.ChunkLen {
hash_avx2.HashF(input, length, counter, flags, key, out, chain)
} else {
hash_pure.HashF(input, length, counter, flags, key, out, chain)
}
}
func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int) {
if consts.HasAVX2 && n >= 2 {
hash_avx2.HashP(left, right, flags, key, out, n)
} else {
hash_pure.HashP(left, right, flags, key, out, n)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,14 @@
//go:build !amd64
// +build !amd64
package hash_avx2
import "github.com/zeebo/blake3/internal/alg/hash/hash_pure"
func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32) {
hash_pure.HashF(input, length, counter, flags, key, out, chain)
}
func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int) {
hash_pure.HashP(left, right, flags, key, out, n)
}

View File

@ -0,0 +1,10 @@
//go:build amd64
// +build amd64
package hash_avx2
//go:noescape
func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32)
//go:noescape
func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int)

View File

@ -0,0 +1,56 @@
package hash_pure
import (
"unsafe"
"github.com/zeebo/blake3/internal/alg/compress"
"github.com/zeebo/blake3/internal/consts"
"github.com/zeebo/blake3/internal/utils"
)
func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32) {
var tmp [16]uint32
for i := uint64(0); consts.ChunkLen*i < length && i < 8; i++ {
bchain := *key
bflags := flags | consts.Flag_ChunkStart
start := consts.ChunkLen * i
for n := uint64(0); n < 16; n++ {
if n == 15 {
bflags |= consts.Flag_ChunkEnd
}
if start+64*n >= length {
break
}
if start+64+64*n >= length {
*chain = bchain
}
var blockPtr *[16]uint32
if consts.OptimizeLittleEndian {
blockPtr = (*[16]uint32)(unsafe.Pointer(&input[consts.ChunkLen*i+consts.BlockLen*n]))
} else {
var block [16]uint32
utils.BytesToWords((*[64]uint8)(unsafe.Pointer(&input[consts.ChunkLen*i+consts.BlockLen*n])), &block)
blockPtr = &block
}
compress.Compress(&bchain, blockPtr, counter, consts.BlockLen, bflags, &tmp)
bchain = *(*[8]uint32)(unsafe.Pointer(&tmp[0]))
bflags = flags
}
out[i+0] = bchain[0]
out[i+8] = bchain[1]
out[i+16] = bchain[2]
out[i+24] = bchain[3]
out[i+32] = bchain[4]
out[i+40] = bchain[5]
out[i+48] = bchain[6]
out[i+56] = bchain[7]
counter++
}
}

View File

@ -0,0 +1,38 @@
package hash_pure
import "github.com/zeebo/blake3/internal/alg/compress"
func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int) {
var tmp [16]uint32
var block [16]uint32
for i := 0; i < n && i < 8; i++ {
block[0] = left[i+0]
block[1] = left[i+8]
block[2] = left[i+16]
block[3] = left[i+24]
block[4] = left[i+32]
block[5] = left[i+40]
block[6] = left[i+48]
block[7] = left[i+56]
block[8] = right[i+0]
block[9] = right[i+8]
block[10] = right[i+16]
block[11] = right[i+24]
block[12] = right[i+32]
block[13] = right[i+40]
block[14] = right[i+48]
block[15] = right[i+56]
compress.Compress(key, &block, 0, 64, flags, &tmp)
out[i+0] = tmp[0]
out[i+8] = tmp[1]
out[i+16] = tmp[2]
out[i+24] = tmp[3]
out[i+32] = tmp[4]
out[i+40] = tmp[5]
out[i+48] = tmp[6]
out[i+56] = tmp[7]
}
}

View File

@ -0,0 +1,29 @@
package consts
var IV = [...]uint32{IV0, IV1, IV2, IV3, IV4, IV5, IV6, IV7}
const (
IV0 = 0x6A09E667
IV1 = 0xBB67AE85
IV2 = 0x3C6EF372
IV3 = 0xA54FF53A
IV4 = 0x510E527F
IV5 = 0x9B05688C
IV6 = 0x1F83D9AB
IV7 = 0x5BE0CD19
)
const (
Flag_ChunkStart uint32 = 1 << 0
Flag_ChunkEnd uint32 = 1 << 1
Flag_Parent uint32 = 1 << 2
Flag_Root uint32 = 1 << 3
Flag_Keyed uint32 = 1 << 4
Flag_DeriveKeyContext uint32 = 1 << 5
Flag_DeriveKeyMaterial uint32 = 1 << 6
)
const (
BlockLen = 64
ChunkLen = 1024
)

17
vendor/github.com/zeebo/blake3/internal/consts/cpu.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package consts
import (
"os"
"github.com/klauspost/cpuid/v2"
)
var (
HasAVX2 = cpuid.CPU.Has(cpuid.AVX2) &&
os.Getenv("BLAKE3_DISABLE_AVX2") == "" &&
os.Getenv("BLAKE3_PUREGO") == ""
HasSSE41 = cpuid.CPU.Has(cpuid.SSE4) &&
os.Getenv("BLAKE3_DISABLE_SSE41") == "" &&
os.Getenv("BLAKE3_PUREGO") == ""
)

View File

@ -0,0 +1,6 @@
//go:build amd64 || 386 || arm || arm64 || mipsle || mips64le || ppc64le || riscv64 || wasm
// +build amd64 386 arm arm64 mipsle mips64le ppc64le riscv64 wasm
package consts
const OptimizeLittleEndian = true

View File

@ -0,0 +1,6 @@
//go:build !amd64 && !386 && !arm && !arm64 && !mipsle && !mips64le && !ppc64le && !riscv64 && !wasm
// +build !amd64,!386,!arm,!arm64,!mipsle,!mips64le,!ppc64le,!riscv64,!wasm
package consts
const OptimizeLittleEndian = false

60
vendor/github.com/zeebo/blake3/internal/utils/utils.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
package utils
import (
"encoding/binary"
"unsafe"
)
func SliceToArray32(bytes []byte) *[32]uint8 { return (*[32]uint8)(unsafe.Pointer(&bytes[0])) }
func SliceToArray64(bytes []byte) *[64]uint8 { return (*[64]uint8)(unsafe.Pointer(&bytes[0])) }
func BytesToWords(bytes *[64]uint8, words *[16]uint32) {
words[0] = binary.LittleEndian.Uint32(bytes[0*4:])
words[1] = binary.LittleEndian.Uint32(bytes[1*4:])
words[2] = binary.LittleEndian.Uint32(bytes[2*4:])
words[3] = binary.LittleEndian.Uint32(bytes[3*4:])
words[4] = binary.LittleEndian.Uint32(bytes[4*4:])
words[5] = binary.LittleEndian.Uint32(bytes[5*4:])
words[6] = binary.LittleEndian.Uint32(bytes[6*4:])
words[7] = binary.LittleEndian.Uint32(bytes[7*4:])
words[8] = binary.LittleEndian.Uint32(bytes[8*4:])
words[9] = binary.LittleEndian.Uint32(bytes[9*4:])
words[10] = binary.LittleEndian.Uint32(bytes[10*4:])
words[11] = binary.LittleEndian.Uint32(bytes[11*4:])
words[12] = binary.LittleEndian.Uint32(bytes[12*4:])
words[13] = binary.LittleEndian.Uint32(bytes[13*4:])
words[14] = binary.LittleEndian.Uint32(bytes[14*4:])
words[15] = binary.LittleEndian.Uint32(bytes[15*4:])
}
func WordsToBytes(words *[16]uint32, bytes []byte) {
bytes = bytes[:64]
binary.LittleEndian.PutUint32(bytes[0*4:1*4], words[0])
binary.LittleEndian.PutUint32(bytes[1*4:2*4], words[1])
binary.LittleEndian.PutUint32(bytes[2*4:3*4], words[2])
binary.LittleEndian.PutUint32(bytes[3*4:4*4], words[3])
binary.LittleEndian.PutUint32(bytes[4*4:5*4], words[4])
binary.LittleEndian.PutUint32(bytes[5*4:6*4], words[5])
binary.LittleEndian.PutUint32(bytes[6*4:7*4], words[6])
binary.LittleEndian.PutUint32(bytes[7*4:8*4], words[7])
binary.LittleEndian.PutUint32(bytes[8*4:9*4], words[8])
binary.LittleEndian.PutUint32(bytes[9*4:10*4], words[9])
binary.LittleEndian.PutUint32(bytes[10*4:11*4], words[10])
binary.LittleEndian.PutUint32(bytes[11*4:12*4], words[11])
binary.LittleEndian.PutUint32(bytes[12*4:13*4], words[12])
binary.LittleEndian.PutUint32(bytes[13*4:14*4], words[13])
binary.LittleEndian.PutUint32(bytes[14*4:15*4], words[14])
binary.LittleEndian.PutUint32(bytes[15*4:16*4], words[15])
}
func KeyFromBytes(key []byte, out *[8]uint32) {
key = key[:32]
out[0] = binary.LittleEndian.Uint32(key[0:])
out[1] = binary.LittleEndian.Uint32(key[4:])
out[2] = binary.LittleEndian.Uint32(key[8:])
out[3] = binary.LittleEndian.Uint32(key[12:])
out[4] = binary.LittleEndian.Uint32(key[16:])
out[5] = binary.LittleEndian.Uint32(key[20:])
out[6] = binary.LittleEndian.Uint32(key[24:])
out[7] = binary.LittleEndian.Uint32(key[28:])
}

39
vendor/modules.txt vendored
View File

@ -1,4 +1,4 @@
# github.com/99designs/gqlgen v0.17.34
# github.com/99designs/gqlgen v0.17.35
## explicit; go 1.18
github.com/99designs/gqlgen
github.com/99designs/gqlgen/api
@ -54,10 +54,10 @@ github.com/beorn7/perks/quantile
# github.com/boltdb/bolt v1.3.1
## explicit
github.com/boltdb/bolt
# github.com/caddyserver/certmagic v0.18.2
# github.com/caddyserver/certmagic v0.19.0
## explicit; go 1.19
github.com/caddyserver/certmagic
# github.com/casbin/casbin/v2 v2.71.1
# github.com/casbin/casbin/v2 v2.72.0
## explicit; go 1.13
github.com/casbin/casbin/v2
github.com/casbin/casbin/v2/config
@ -78,7 +78,7 @@ github.com/cespare/xxhash/v2
# github.com/cpuguy83/go-md2man/v2 v2.0.2
## explicit; go 1.11
github.com/cpuguy83/go-md2man/v2/md2man
# github.com/datarhei/core-client-go/v16 v16.11.1-0.20230710090938-bfcb7f5f7b3e
# github.com/datarhei/core-client-go/v16 v16.11.1-0.20230717141633-8f0e5ce4c68c
## explicit; go 1.18
github.com/datarhei/core-client-go/v16
github.com/datarhei/core-client-go/v16/api
@ -135,8 +135,8 @@ github.com/gabriel-vasile/mimetype/internal/magic
## explicit; go 1.12
github.com/go-ole/go-ole
github.com/go-ole/go-ole/oleutil
# github.com/go-openapi/jsonpointer v0.19.6
## explicit; go 1.13
# github.com/go-openapi/jsonpointer v0.20.0
## explicit; go 1.18
github.com/go-openapi/jsonpointer
# github.com/go-openapi/jsonreference v0.20.2
## explicit; go 1.13
@ -232,7 +232,7 @@ github.com/klauspost/compress/s2
# github.com/klauspost/cpuid/v2 v2.2.5
## explicit; go 1.15
github.com/klauspost/cpuid/v2
# github.com/labstack/echo/v4 v4.10.2
# github.com/labstack/echo/v4 v4.11.1
## explicit; go 1.17
github.com/labstack/echo/v4
github.com/labstack/echo/v4/middleware
@ -282,7 +282,7 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
# github.com/minio/minio-go/v7 v7.0.59
# github.com/minio/minio-go/v7 v7.0.60
## explicit; go 1.17
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials
@ -371,6 +371,15 @@ github.com/swaggo/files/v2
# github.com/swaggo/swag v1.16.1
## explicit; go 1.18
github.com/swaggo/swag
# github.com/tidwall/gjson v1.14.4
## explicit; go 1.12
github.com/tidwall/gjson
# github.com/tidwall/match v1.1.1
## explicit; go 1.15
github.com/tidwall/match
# github.com/tidwall/pretty v1.2.1
## explicit; go 1.16
github.com/tidwall/pretty
# github.com/tklauser/go-sysconf v0.3.11
## explicit; go 1.13
github.com/tklauser/go-sysconf
@ -386,7 +395,7 @@ github.com/valyala/bytebufferpool
# github.com/valyala/fasttemplate v1.2.2
## explicit; go 1.12
github.com/valyala/fasttemplate
# github.com/vektah/gqlparser/v2 v2.5.6
# github.com/vektah/gqlparser/v2 v2.5.8
## explicit; go 1.16
github.com/vektah/gqlparser/v2
github.com/vektah/gqlparser/v2/ast
@ -410,6 +419,18 @@ github.com/xrash/smetrics
# github.com/yusufpapurcu/wmi v1.2.3
## explicit; go 1.16
github.com/yusufpapurcu/wmi
# github.com/zeebo/blake3 v0.2.3
## explicit; go 1.13
github.com/zeebo/blake3
github.com/zeebo/blake3/internal/alg
github.com/zeebo/blake3/internal/alg/compress
github.com/zeebo/blake3/internal/alg/compress/compress_pure
github.com/zeebo/blake3/internal/alg/compress/compress_sse41
github.com/zeebo/blake3/internal/alg/hash
github.com/zeebo/blake3/internal/alg/hash/hash_avx2
github.com/zeebo/blake3/internal/alg/hash/hash_pure
github.com/zeebo/blake3/internal/consts
github.com/zeebo/blake3/internal/utils
# go.etcd.io/bbolt v1.3.7
## explicit; go 1.17
go.etcd.io/bbolt