Update dependencies
This commit is contained in:
parent
28d9ae78c7
commit
e35c3dead3
54
go.mod
54
go.mod
@ -1,17 +1,19 @@
|
||||
module github.com/datarhei/core/v16
|
||||
|
||||
go 1.18
|
||||
go 1.21
|
||||
|
||||
toolchain go1.22.1
|
||||
|
||||
require (
|
||||
github.com/99designs/gqlgen v0.17.44
|
||||
github.com/99designs/gqlgen v0.17.45
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/atrox/haikunatorgo/v2 v2.0.1
|
||||
github.com/caddyserver/certmagic v0.20.0
|
||||
github.com/datarhei/gosrt v0.5.7
|
||||
github.com/datarhei/gosrt v0.6.0
|
||||
github.com/datarhei/joy4 v0.0.0-20240229100136-43bcaf8ef5e7
|
||||
github.com/go-playground/validator/v10 v10.18.0
|
||||
github.com/go-playground/validator/v10 v10.19.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/invopop/jsonschema v0.4.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
@ -19,18 +21,18 @@ require (
|
||||
github.com/labstack/echo/v4 v4.11.4
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/minio/minio-go/v7 v7.0.67
|
||||
github.com/minio/minio-go/v7 v7.0.69
|
||||
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0
|
||||
github.com/shirou/gopsutil/v3 v3.24.1
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/shirou/gopsutil/v3 v3.24.3
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/swaggo/echo-swagger v1.4.1
|
||||
github.com/swaggo/swag v1.16.3
|
||||
github.com/vektah/gqlparser/v2 v2.5.11
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/mod v0.15.0
|
||||
golang.org/x/mod v0.17.0
|
||||
)
|
||||
|
||||
require (
|
||||
@ -38,17 +40,17 @@ require (
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.20.2 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.4 // indirect
|
||||
github.com/go-openapi/spec v0.20.14 // indirect
|
||||
github.com/go-openapi/swag v0.22.9 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
@ -58,12 +60,12 @@ require (
|
||||
github.com/iancoleman/orderedmap v0.2.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/labstack/gommon v0.4.2 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/libdns/libdns v0.2.1 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect
|
||||
github.com/libdns/libdns v0.2.2 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mholt/acmez v1.2.0 // indirect
|
||||
@ -75,13 +77,12 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.52.3 // indirect
|
||||
github.com/prometheus/procfs v0.13.0 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sosodev/duration v1.2.0 // indirect
|
||||
github.com/swaggo/files/v2 v2.0.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
@ -95,13 +96,14 @@ require (
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.20.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/net v0.24.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.18.0 // indirect
|
||||
google.golang.org/protobuf v1.32.0 // indirect
|
||||
golang.org/x/tools v0.20.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
120
go.sum
120
go.sum
@ -1,14 +1,17 @@
|
||||
github.com/99designs/gqlgen v0.17.44 h1:OS2wLk/67Y+vXM75XHbwRnNYJcbuJd4OBL76RX3NQQA=
|
||||
github.com/99designs/gqlgen v0.17.44/go.mod h1:UTCu3xpK2mLI5qcMNw+HKDiEL77it/1XtAjisC4sLwM=
|
||||
github.com/99designs/gqlgen v0.17.45 h1:bH0AH67vIJo8JKNKPJP+pOPpQhZeuVRQLf53dKIpDik=
|
||||
github.com/99designs/gqlgen v0.17.45/go.mod h1:Bas0XQ+Jiu/Xm5E33jC8sES3G+iC2esHBMXcq0fUPs0=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM=
|
||||
github.com/PuerkitoBio/goquery v1.9.1 h1:mTL6XjbJTZdpfL+Gwl5U2h1l9yEkJjhmlTeV9VPW7UI=
|
||||
github.com/PuerkitoBio/goquery v1.9.1/go.mod h1:cW1n6TmIMDoORQU5IU/P1T3tGFunOeXEpGP2WHRwkbY=
|
||||
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
|
||||
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
|
||||
github.com/andybalholm/cascadia v1.3.1 h1:nhxRkql1kdYCc8Snf7D5/D3spOX+dBgjA6u8x004T2c=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
github.com/atrox/haikunatorgo/v2 v2.0.1 h1:FCVx2KL2YvZtI1rI9WeEHxeLRrKGr0Dd4wfCJiUXupc=
|
||||
@ -19,12 +22,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/caddyserver/certmagic v0.20.0 h1:bTw7LcEZAh9ucYCRXyCpIrSAGplplI0vGYJ4BpCQ/Fc=
|
||||
github.com/caddyserver/certmagic v0.20.0/go.mod h1:N4sXgpICQUskEWpj7zVzvWD41p3NYacrNoZYiRM2jTg=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/datarhei/gosrt v0.5.7 h1:1COeDgF0D0v0poWu0yKDC72d29x16Ma6VFR1icx+3Xc=
|
||||
github.com/datarhei/gosrt v0.5.7/go.mod h1:ZicbsY9T2rXtWgQVBTR9ilnEkSYVSIb36hG9Lj7XCKM=
|
||||
github.com/datarhei/gosrt v0.6.0 h1:HrrXAw90V78ok4WMIhX6se1aTHPCn82Sg2hj+PhdmGc=
|
||||
github.com/datarhei/gosrt v0.6.0/go.mod h1:fsOWdLSHUHShHjgi/46h6wjtdQrtnSdAQFnlas8ONxs=
|
||||
github.com/datarhei/joy4 v0.0.0-20240229100136-43bcaf8ef5e7 h1:MG5XQMTTDPcuvvRzc1c37QbwgDbYPhKmPFo9gSaPdBE=
|
||||
github.com/datarhei/joy4 v0.0.0-20240229100136-43bcaf8ef5e7/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -41,29 +44,30 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
|
||||
github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
|
||||
github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
|
||||
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
|
||||
github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do=
|
||||
github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw=
|
||||
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
|
||||
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||
github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
|
||||
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U=
|
||||
github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4=
|
||||
github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
@ -87,14 +91,16 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo-jwt v0.0.0-20221127215225-c84d41a71003 h1:FyalHKl9hnJvhNbrABJXXjC2hG7gvIF0ioW9i0xHNQU=
|
||||
github.com/labstack/echo-jwt v0.0.0-20221127215225-c84d41a71003/go.mod h1:ovRFgyKvi73jQIFCWz9ByQwzhIyohkzY0MFAlPGyr8Q=
|
||||
github.com/labstack/echo/v4 v4.11.4 h1:vDZmA+qNeh1pd/cCkEicDMrjtrnMGQ1QFI9gWN1zGq8=
|
||||
@ -103,13 +109,13 @@ github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0
|
||||
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis=
|
||||
github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40=
|
||||
github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
|
||||
github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c=
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI=
|
||||
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74 h1:1KuuSOy4ZNgW0KA2oYIngXVFhQcXxhLqCVK7cBcldkk=
|
||||
github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@ -123,8 +129,8 @@ github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8=
|
||||
github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A=
|
||||
github.com/minio/minio-go/v7 v7.0.69 h1:l8AnsQFyY1xiwa/DaQskY4NXSLA2yrGsW5iD9nRPVS0=
|
||||
github.com/minio/minio-go/v7 v7.0.69/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
@ -144,40 +150,41 @@ github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 h1:Y7qCvg282QmlyrVQuL
|
||||
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS151WBDIpmLshHY0l+3edpuKnBUWVVbWKk=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
|
||||
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.52.3 h1:5f8uj6ZwHSscOGNdIQg6OiZv/ybiK2CO2q2drVZAQSA=
|
||||
github.com/prometheus/common v0.52.3/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
|
||||
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
|
||||
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0 h1:EewKT7/LNac5SLiEblJeUu8z5eERHrmRLnMQL2d7qX4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI=
|
||||
github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU=
|
||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
github.com/shirou/gopsutil/v3 v3.24.3 h1:eoUGJSmdfLzJ3mxIhmOAhgKEKgQkeOwKpz1NbhVnuPE=
|
||||
github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg=
|
||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sosodev/duration v1.2.0 h1:pqK/FLSjsAADWY74SyWDCjOcd5l7H8GSnnOGEB9A1Us=
|
||||
github.com/sosodev/duration v1.2.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/swaggo/echo-swagger v1.4.1 h1:Yf0uPaJWp1uRtDloZALyLnvdBeoEL5Kc7DtnjzO/TUk=
|
||||
github.com/swaggo/echo-swagger v1.4.1/go.mod h1:C8bSi+9yH2FLZsnhqMZLIZddpUxZdBYuNHbtaS1Hljc=
|
||||
github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw=
|
||||
@ -207,7 +214,6 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
|
||||
@ -217,40 +223,42 @@ github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvv
|
||||
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
|
||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
||||
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
|
||||
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
|
||||
2
vendor/github.com/99designs/gqlgen/README.md
generated
vendored
2
vendor/github.com/99designs/gqlgen/README.md
generated
vendored
@ -86,7 +86,7 @@ models:
|
||||
|
||||
- #### Using Explicit Resolvers
|
||||
|
||||
If you want to Keep using the generated model, mark the field as requiring a resolver explicitly in `gqlgen.yml` like this:
|
||||
If you want to keep using the generated model, mark the field as requiring a resolver explicitly in `gqlgen.yml` like this:
|
||||
|
||||
```yaml
|
||||
# gqlgen.yml
|
||||
|
||||
1
vendor/github.com/99designs/gqlgen/codegen/config/config.go
generated
vendored
1
vendor/github.com/99designs/gqlgen/codegen/config/config.go
generated
vendored
@ -39,6 +39,7 @@ type Config struct {
|
||||
OmitGQLGenFileNotice bool `yaml:"omit_gqlgen_file_notice,omitempty"`
|
||||
OmitGQLGenVersionInFileNotice bool `yaml:"omit_gqlgen_version_in_file_notice,omitempty"`
|
||||
OmitRootModels bool `yaml:"omit_root_models,omitempty"`
|
||||
OmitResolverFields bool `yaml:"omit_resolver_fields,omitempty"`
|
||||
StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"`
|
||||
ReturnPointersInUmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"`
|
||||
ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"`
|
||||
|
||||
1
vendor/github.com/99designs/gqlgen/codegen/config/package.go
generated
vendored
1
vendor/github.com/99designs/gqlgen/codegen/config/package.go
generated
vendored
@ -14,6 +14,7 @@ type PackageConfig struct {
|
||||
Package string `yaml:"package,omitempty"`
|
||||
Version int `yaml:"version,omitempty"`
|
||||
ModelTemplate string `yaml:"model_template,omitempty"`
|
||||
Options map[string]bool `yaml:"options,omitempty"`
|
||||
}
|
||||
|
||||
func (c *PackageConfig) ImportPath() string {
|
||||
|
||||
2
vendor/github.com/99designs/gqlgen/graphql/version.go
generated
vendored
2
vendor/github.com/99designs/gqlgen/graphql/version.go
generated
vendored
@ -1,3 +1,3 @@
|
||||
package graphql
|
||||
|
||||
const Version = "v0.17.44"
|
||||
const Version = "v0.17.45"
|
||||
|
||||
6
vendor/github.com/99designs/gqlgen/plugin/federation/entity.go
generated
vendored
6
vendor/github.com/99designs/gqlgen/plugin/federation/entity.go
generated
vendored
@ -18,6 +18,7 @@ type Entity struct {
|
||||
Resolvers []*EntityResolver
|
||||
Requires []*Requires
|
||||
Multi bool
|
||||
Type types.Type
|
||||
}
|
||||
|
||||
type EntityResolver struct {
|
||||
@ -116,3 +117,8 @@ func (e *Entity) keyFields() []string {
|
||||
}
|
||||
return keyFields
|
||||
}
|
||||
|
||||
// GetTypeInfo - get the imported package & type name combo. package.TypeName
|
||||
func (e Entity) GetTypeInfo() string {
|
||||
return templates.CurrentImports.LookupType(e.Type)
|
||||
}
|
||||
|
||||
99
vendor/github.com/99designs/gqlgen/plugin/federation/federation.go
generated
vendored
99
vendor/github.com/99designs/gqlgen/plugin/federation/federation.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
"github.com/99designs/gqlgen/codegen"
|
||||
"github.com/99designs/gqlgen/codegen/config"
|
||||
"github.com/99designs/gqlgen/codegen/templates"
|
||||
"github.com/99designs/gqlgen/internal/rewrite"
|
||||
"github.com/99designs/gqlgen/plugin"
|
||||
"github.com/99designs/gqlgen/plugin/federation/fieldset"
|
||||
)
|
||||
@ -18,9 +19,13 @@ import (
|
||||
//go:embed federation.gotpl
|
||||
var federationTemplate string
|
||||
|
||||
//go:embed requires.gotpl
|
||||
var explicitRequiresTemplate string
|
||||
|
||||
type federation struct {
|
||||
Entities []*Entity
|
||||
Version int
|
||||
PackageOptions map[string]bool
|
||||
}
|
||||
|
||||
// New returns a federation plugin that injects
|
||||
@ -263,6 +268,16 @@ type Entity {
|
||||
}
|
||||
|
||||
func (f *federation) GenerateCode(data *codegen.Data) error {
|
||||
// requires imports
|
||||
requiresImports := make(map[string]bool, 0)
|
||||
requiresImports["context"] = true
|
||||
requiresImports["fmt"] = true
|
||||
|
||||
requiresEntities := make(map[string]*Entity, 0)
|
||||
|
||||
// Save package options on f for template use
|
||||
f.PackageOptions = data.Config.Federation.Options
|
||||
|
||||
if len(f.Entities) > 0 {
|
||||
if data.Objects.ByName("Entity") != nil {
|
||||
data.Objects.ByName("Entity").Root = true
|
||||
@ -302,9 +317,19 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
|
||||
fmt.Println("skipping @requires field " + reqField.Name + " in " + e.Def.Name)
|
||||
continue
|
||||
}
|
||||
// keep track of which entities have requires
|
||||
requiresEntities[e.Def.Name] = e
|
||||
// make a proper import path
|
||||
typeString := strings.Split(obj.Type.String(), ".")
|
||||
requiresImports[strings.Join(typeString[:len(typeString)-1], ".")] = true
|
||||
|
||||
cgField := reqField.Field.TypeReference(obj, data.Objects)
|
||||
reqField.Type = cgField.TypeReference
|
||||
}
|
||||
|
||||
// add type info to entity
|
||||
e.Type = obj.Type
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -325,10 +350,82 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
|
||||
}
|
||||
}
|
||||
|
||||
if data.Config.Federation.Options["explicit_requires"] && len(requiresEntities) > 0 {
|
||||
// check for existing requires functions
|
||||
type Populator struct {
|
||||
FuncName string
|
||||
Exists bool
|
||||
Comment string
|
||||
Implementation string
|
||||
Entity *Entity
|
||||
}
|
||||
populators := make([]Populator, 0)
|
||||
|
||||
rewriter, err := rewrite.New(data.Config.Federation.Dir())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, entity := range requiresEntities {
|
||||
populator := Populator{
|
||||
FuncName: fmt.Sprintf("Populate%sRequires", name),
|
||||
Entity: entity,
|
||||
}
|
||||
|
||||
populator.Comment = strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment("executionContext", populator.FuncName), `\`))
|
||||
populator.Implementation = strings.TrimSpace(rewriter.GetMethodBody("executionContext", populator.FuncName))
|
||||
|
||||
if populator.Implementation == "" {
|
||||
populator.Exists = false
|
||||
populator.Implementation = fmt.Sprintf("panic(fmt.Errorf(\"not implemented: %v\"))", populator.FuncName)
|
||||
}
|
||||
populators = append(populators, populator)
|
||||
}
|
||||
|
||||
sort.Slice(populators, func(i, j int) bool {
|
||||
return populators[i].FuncName < populators[j].FuncName
|
||||
})
|
||||
|
||||
requiresFile := data.Config.Federation.Dir() + "/federation.requires.go"
|
||||
existingImports := rewriter.ExistingImports(requiresFile)
|
||||
for _, imp := range existingImports {
|
||||
if imp.Alias == "" {
|
||||
// import exists in both places, remove
|
||||
delete(requiresImports, imp.ImportPath)
|
||||
}
|
||||
}
|
||||
|
||||
for k := range requiresImports {
|
||||
existingImports = append(existingImports, rewrite.Import{ImportPath: k})
|
||||
}
|
||||
|
||||
// render requires populators
|
||||
err = templates.Render(templates.Options{
|
||||
PackageName: data.Config.Federation.Package,
|
||||
Filename: requiresFile,
|
||||
Data: struct {
|
||||
federation
|
||||
ExistingImports []rewrite.Import
|
||||
Populators []Populator
|
||||
OriginalSource string
|
||||
}{*f, existingImports, populators, ""},
|
||||
GeneratedHeader: false,
|
||||
Packages: data.Config.Packages,
|
||||
Template: explicitRequiresTemplate,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return templates.Render(templates.Options{
|
||||
PackageName: data.Config.Federation.Package,
|
||||
Filename: data.Config.Federation.Filename,
|
||||
Data: f,
|
||||
Data: struct {
|
||||
federation
|
||||
UsePointers bool
|
||||
}{*f, data.Config.ResolversAlwaysReturnPointers},
|
||||
GeneratedHeader: true,
|
||||
Packages: data.Config.Packages,
|
||||
Template: federationTemplate,
|
||||
|
||||
9
vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl
generated
vendored
9
vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl
generated
vendored
@ -5,6 +5,8 @@
|
||||
{{ reserveImport "sync" }}
|
||||
|
||||
{{ reserveImport "github.com/99designs/gqlgen/plugin/federation/fedruntime" }}
|
||||
{{ $options := .PackageOptions }}
|
||||
{{ $usePointers := .UsePointers }}
|
||||
|
||||
var (
|
||||
ErrUnknownType = errors.New("unknown type")
|
||||
@ -103,12 +105,19 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
|
||||
if err != nil {
|
||||
return fmt.Errorf(`resolving Entity "{{$entity.Def.Name}}": %w`, err)
|
||||
}
|
||||
{{ if and (index $options "explicit_requires") $entity.Requires }}
|
||||
err = ec.Populate{{$entity.Def.Name}}Requires(ctx, {{- if (not $usePointers) -}}&{{- end -}}entity, rep)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`populating requires for Entity "{{$entity.Def.Name}}": %w`, err)
|
||||
}
|
||||
{{- else }}
|
||||
{{ range $entity.Requires }}
|
||||
entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
list[idx[i]] = entity
|
||||
return nil
|
||||
{{- end }}
|
||||
|
||||
10
vendor/github.com/99designs/gqlgen/plugin/federation/fieldset/fieldset.go
generated
vendored
10
vendor/github.com/99designs/gqlgen/plugin/federation/fieldset/fieldset.go
generated
vendored
@ -100,6 +100,7 @@ func (f Field) ToGoPrivate() string {
|
||||
|
||||
for i, field := range f {
|
||||
if i == 0 {
|
||||
field = trimArgumentFromFieldName(field)
|
||||
ret += templates.ToGoPrivate(field)
|
||||
continue
|
||||
}
|
||||
@ -148,7 +149,7 @@ func extractSubs(str string) (string, string, string) {
|
||||
if start < 0 || end < 0 {
|
||||
panic("invalid key fieldSet: " + str)
|
||||
}
|
||||
return strings.TrimSpace(str[:start]), strings.TrimSpace(str[start+1 : end]), strings.TrimSpace(str[end+1:])
|
||||
return trimArgumentFromFieldName(strings.TrimSpace(str[:start])), strings.TrimSpace(str[start+1 : end]), strings.TrimSpace(str[end+1:])
|
||||
}
|
||||
|
||||
// matchingBracketIndex returns the index of the closing bracket, assuming an open bracket at start.
|
||||
@ -174,9 +175,16 @@ func matchingBracketIndex(str string, start int) int {
|
||||
|
||||
func fieldByName(obj *codegen.Object, name string) *codegen.Field {
|
||||
for _, field := range obj.Fields {
|
||||
field.Name = trimArgumentFromFieldName(field.Name)
|
||||
if field.Name == name {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// trimArgumentFromFieldName removes any arguments from the field name.
|
||||
// It removes any suffixes from the raw string, starting from the argument-open character `(`
|
||||
func trimArgumentFromFieldName(raw string) string {
|
||||
return strings.Split(raw, "(")[0]
|
||||
}
|
||||
|
||||
20
vendor/github.com/99designs/gqlgen/plugin/federation/requires.gotpl
generated
vendored
Normal file
20
vendor/github.com/99designs/gqlgen/plugin/federation/requires.gotpl
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
{{ range .ExistingImports }}
|
||||
{{ if ne .Alias "" }}
|
||||
{{ reserveImport .ImportPath .Alias }}
|
||||
{{ else }}
|
||||
{{ reserveImport .ImportPath }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
{{ range .Populators -}}
|
||||
{{ if .Comment -}}
|
||||
// {{.Comment}}
|
||||
{{- else -}}
|
||||
// {{.FuncName}} is the requires populator for the {{.Entity.Def.Name}} entity.
|
||||
{{- end }}
|
||||
func (ec *executionContext) {{.FuncName}}(ctx context.Context, entity *{{.Entity.GetTypeInfo}}, reps map[string]interface{}) error {
|
||||
{{.Implementation}}
|
||||
}
|
||||
{{ end }}
|
||||
|
||||
{{ .OriginalSource }}
|
||||
11
vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go
generated
vendored
11
vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go
generated
vendored
@ -71,6 +71,7 @@ type Field struct {
|
||||
GoName string
|
||||
Type types.Type
|
||||
Tag string
|
||||
IsResolver bool
|
||||
Omittable bool
|
||||
}
|
||||
|
||||
@ -419,6 +420,10 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition)
|
||||
f = mf
|
||||
}
|
||||
|
||||
if f.IsResolver && cfg.OmitResolverFields {
|
||||
continue
|
||||
}
|
||||
|
||||
if f.Omittable {
|
||||
if schemaType.Kind != ast.InputObject || field.Type.NonNull {
|
||||
return nil, fmt.Errorf("generror: field %v.%v: omittable is only applicable to nullable input fields", schemaType.Name, field.Name)
|
||||
@ -613,6 +618,12 @@ func GoFieldHook(td *ast.Definition, fd *ast.FieldDefinition, f *Field) (*Field,
|
||||
}
|
||||
}
|
||||
|
||||
if arg := goField.Arguments.ForName("forceResolver"); arg != nil {
|
||||
if k, err := arg.Value.Value(nil); err == nil {
|
||||
f.IsResolver = k.(bool)
|
||||
}
|
||||
}
|
||||
|
||||
if arg := goField.Arguments.ForName("omittable"); arg != nil {
|
||||
if k, err := arg.Value.Value(nil); err == nil {
|
||||
f.Omittable = k.(bool)
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
- [Ristretto](https://github.com/dgraph-io/ristretto)
|
||||
- [Badger](https://github.com/dgraph-io/badger)
|
||||
|
||||
29
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
29
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -19,10 +19,13 @@ const (
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
// contiguous array for the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
//
|
||||
// Note that a zero-valued Digest is not ready to receive writes.
|
||||
// Call Reset or create a Digest using New before calling other methods.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
@ -33,19 +36,31 @@ type Digest struct {
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
// New creates a new Digest with a zero seed.
|
||||
func New() *Digest {
|
||||
return NewWithSeed(0)
|
||||
}
|
||||
|
||||
// NewWithSeed creates a new Digest with the given seed.
|
||||
func NewWithSeed(seed uint64) *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
d.ResetWithSeed(seed)
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
// It uses a seed value of zero.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -primes[0]
|
||||
d.ResetWithSeed(0)
|
||||
}
|
||||
|
||||
// ResetWithSeed clears the Digest's state so that it can be reused.
|
||||
// It uses the given seed to initialize the state.
|
||||
func (d *Digest) ResetWithSeed(seed uint64) {
|
||||
d.v1 = seed + prime1 + prime2
|
||||
d.v2 = seed + prime2
|
||||
d.v3 = seed
|
||||
d.v4 = seed - prime1
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
@ -6,7 +6,7 @@
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@ -3,7 +3,7 @@
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@ -5,7 +5,7 @@
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
2
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -33,7 +33,7 @@ import (
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
|
||||
2
vendor/github.com/datarhei/gosrt/Makefile
generated
vendored
2
vendor/github.com/datarhei/gosrt/Makefile
generated
vendored
@ -9,7 +9,7 @@ test:
|
||||
|
||||
## fuzz: Run fuzz tests
|
||||
fuzz:
|
||||
go test -fuzz=Fuzz -run=^Fuzz ./internal/packet -fuzztime 30s
|
||||
go test -fuzz=Fuzz -run=^Fuzz ./packet -fuzztime 30s
|
||||
|
||||
## vet: Analyze code for potential errors
|
||||
vet:
|
||||
|
||||
124
vendor/github.com/datarhei/gosrt/congestion/congestion.go
generated
vendored
Normal file
124
vendor/github.com/datarhei/gosrt/congestion/congestion.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
// Package congestions provides interfaces and types congestion control implementations for SRT
|
||||
package congestion
|
||||
|
||||
import (
|
||||
"github.com/datarhei/gosrt/circular"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
)
|
||||
|
||||
// Sender is the sending part of the congestion control
|
||||
type Sender interface {
|
||||
// Stats returns sender statistics.
|
||||
Stats() SendStats
|
||||
|
||||
// Flush flushes all queued packages.
|
||||
Flush()
|
||||
|
||||
// Push pushes a packet to be send on the sender queue.
|
||||
Push(p packet.Packet)
|
||||
|
||||
// Tick gets called from a connection in order to proceed with the queued packets. The provided value for
|
||||
// now is corresponds to the timestamps in the queued packets. Those timestamps are the microseconds
|
||||
// since the start of the connection.
|
||||
Tick(now uint64)
|
||||
|
||||
// ACK gets called when a sequence number has been confirmed from a receiver.
|
||||
ACK(sequenceNumber circular.Number)
|
||||
|
||||
// NAK get called when packets with the listed sequence number should be resend.
|
||||
NAK(sequenceNumbers []circular.Number)
|
||||
|
||||
// SetDropThreshold sets the threshold in microseconds for when to drop too late packages from the queue.
|
||||
SetDropThreshold(threshold uint64)
|
||||
}
|
||||
|
||||
// Receiver is the receiving part of the congestion control
|
||||
type Receiver interface {
|
||||
// Stats returns receiver statistics.
|
||||
Stats() ReceiveStats
|
||||
|
||||
// PacketRate returns the current packets and bytes per second, and the capacity of the link.
|
||||
PacketRate() (pps, bps, capacity float64)
|
||||
|
||||
// Flush flushes all queued packages.
|
||||
Flush()
|
||||
|
||||
// Push pushed a recieved packet to the receiver queue.
|
||||
Push(pkt packet.Packet)
|
||||
|
||||
// Tick gets called from a connection in order to proceed with queued packets. The provided value for
|
||||
// now is corresponds to the timestamps in the queued packets. Those timestamps are the microseconds
|
||||
// since the start of the connection.
|
||||
Tick(now uint64)
|
||||
|
||||
// SetNAKInterval sets the interval between two periodic NAK messages to the sender in microseconds.
|
||||
SetNAKInterval(nakInterval uint64)
|
||||
}
|
||||
|
||||
// SendStats are collected statistics from a sender
|
||||
type SendStats struct {
|
||||
Pkt uint64 // Sent packets in total
|
||||
Byte uint64 // Sent bytes in total
|
||||
|
||||
PktUnique uint64
|
||||
ByteUnique uint64
|
||||
|
||||
PktLoss uint64
|
||||
ByteLoss uint64
|
||||
|
||||
PktRetrans uint64
|
||||
ByteRetrans uint64
|
||||
|
||||
UsSndDuration uint64 // microseconds
|
||||
|
||||
PktDrop uint64
|
||||
ByteDrop uint64
|
||||
|
||||
// instantaneous
|
||||
PktBuf uint64
|
||||
ByteBuf uint64
|
||||
MsBuf uint64
|
||||
|
||||
PktFlightSize uint64
|
||||
|
||||
UsPktSndPeriod float64 // microseconds
|
||||
BytePayload uint64
|
||||
|
||||
MbpsEstimatedInputBandwidth float64
|
||||
MbpsEstimatedSentBandwidth float64
|
||||
|
||||
PktLossRate float64
|
||||
}
|
||||
|
||||
// ReceiveStats are collected statistics from a reciever
|
||||
type ReceiveStats struct {
|
||||
Pkt uint64
|
||||
Byte uint64
|
||||
|
||||
PktUnique uint64
|
||||
ByteUnique uint64
|
||||
|
||||
PktLoss uint64
|
||||
ByteLoss uint64
|
||||
|
||||
PktRetrans uint64
|
||||
ByteRetrans uint64
|
||||
|
||||
PktBelated uint64
|
||||
ByteBelated uint64
|
||||
|
||||
PktDrop uint64
|
||||
ByteDrop uint64
|
||||
|
||||
// instantaneous
|
||||
PktBuf uint64
|
||||
ByteBuf uint64
|
||||
MsBuf uint64
|
||||
|
||||
BytePayload uint64
|
||||
|
||||
MbpsEstimatedRecvBandwidth float64
|
||||
MbpsEstimatedLinkCapacity float64
|
||||
|
||||
PktLossRate float64
|
||||
}
|
||||
2
vendor/github.com/datarhei/gosrt/congestion/live/doc.go
generated
vendored
Normal file
2
vendor/github.com/datarhei/gosrt/congestion/live/doc.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
// Package live provides implementations of the Sender and Receiver interfaces for live congestion control
|
||||
package live
|
||||
182
vendor/github.com/datarhei/gosrt/congestion/live/fake.go
generated
vendored
Normal file
182
vendor/github.com/datarhei/gosrt/congestion/live/fake.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package live
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/gosrt/circular"
|
||||
"github.com/datarhei/gosrt/congestion"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
)
|
||||
|
||||
type fakeLiveReceive struct {
|
||||
maxSeenSequenceNumber circular.Number
|
||||
lastACKSequenceNumber circular.Number
|
||||
lastDeliveredSequenceNumber circular.Number
|
||||
|
||||
nPackets uint
|
||||
|
||||
periodicACKInterval uint64 // config
|
||||
periodicNAKInterval uint64 // config
|
||||
|
||||
lastPeriodicACK uint64
|
||||
|
||||
avgPayloadSize float64 // bytes
|
||||
|
||||
rate struct {
|
||||
last time.Time
|
||||
period time.Duration
|
||||
|
||||
packets uint64
|
||||
bytes uint64
|
||||
|
||||
pps float64
|
||||
bps float64
|
||||
}
|
||||
|
||||
sendACK func(seq circular.Number, light bool)
|
||||
sendNAK func(from, to circular.Number)
|
||||
deliver func(p packet.Packet)
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewFakeLiveReceive(config ReceiveConfig) congestion.Receiver {
|
||||
r := &fakeLiveReceive{
|
||||
maxSeenSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
lastACKSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
lastDeliveredSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
|
||||
periodicACKInterval: config.PeriodicACKInterval,
|
||||
periodicNAKInterval: config.PeriodicNAKInterval,
|
||||
|
||||
avgPayloadSize: 1456, // 5.1.2. SRT's Default LiveCC Algorithm
|
||||
|
||||
sendACK: config.OnSendACK,
|
||||
sendNAK: config.OnSendNAK,
|
||||
deliver: config.OnDeliver,
|
||||
}
|
||||
|
||||
if r.sendACK == nil {
|
||||
r.sendACK = func(seq circular.Number, light bool) {}
|
||||
}
|
||||
|
||||
if r.sendNAK == nil {
|
||||
r.sendNAK = func(from, to circular.Number) {}
|
||||
}
|
||||
|
||||
if r.deliver == nil {
|
||||
r.deliver = func(p packet.Packet) {}
|
||||
}
|
||||
|
||||
r.rate.last = time.Now()
|
||||
r.rate.period = time.Second
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) Stats() congestion.ReceiveStats { return congestion.ReceiveStats{} }
|
||||
func (r *fakeLiveReceive) PacketRate() (pps, bps, capacity float64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
tdiff := time.Since(r.rate.last)
|
||||
|
||||
if tdiff < r.rate.period {
|
||||
pps = r.rate.pps
|
||||
bps = r.rate.bps
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
r.rate.pps = float64(r.rate.packets) / tdiff.Seconds()
|
||||
r.rate.bps = float64(r.rate.bytes) / tdiff.Seconds()
|
||||
|
||||
r.rate.packets, r.rate.bytes = 0, 0
|
||||
r.rate.last = time.Now()
|
||||
|
||||
pps = r.rate.pps
|
||||
bps = r.rate.bps
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) Flush() {}
|
||||
|
||||
func (r *fakeLiveReceive) Push(pkt packet.Packet) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
if pkt == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.nPackets++
|
||||
|
||||
pktLen := pkt.Len()
|
||||
|
||||
r.rate.packets++
|
||||
r.rate.bytes += pktLen
|
||||
|
||||
// 5.1.2. SRT's Default LiveCC Algorithm
|
||||
r.avgPayloadSize = 0.875*r.avgPayloadSize + 0.125*float64(pktLen)
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lte(r.lastDeliveredSequenceNumber) {
|
||||
// Too old, because up until r.lastDeliveredSequenceNumber, we already delivered
|
||||
return
|
||||
}
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lt(r.lastACKSequenceNumber) {
|
||||
// Already acknowledged, ignoring
|
||||
return
|
||||
}
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lte(r.maxSeenSequenceNumber) {
|
||||
return
|
||||
}
|
||||
|
||||
r.maxSeenSequenceNumber = pkt.Header().PacketSequenceNumber
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) periodicACK(now uint64) (ok bool, sequenceNumber circular.Number, lite bool) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
|
||||
// 4.8.1. Packet Acknowledgement (ACKs, ACKACKs)
|
||||
if now-r.lastPeriodicACK < r.periodicACKInterval {
|
||||
if r.nPackets >= 64 {
|
||||
lite = true // Send light ACK
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ok = true
|
||||
sequenceNumber = r.maxSeenSequenceNumber.Inc()
|
||||
|
||||
r.lastACKSequenceNumber = r.maxSeenSequenceNumber
|
||||
|
||||
r.lastPeriodicACK = now
|
||||
r.nPackets = 0
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) Tick(now uint64) {
|
||||
if ok, sequenceNumber, lite := r.periodicACK(now); ok {
|
||||
r.sendACK(sequenceNumber, lite)
|
||||
}
|
||||
|
||||
// Deliver packets whose PktTsbpdTime is ripe
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.lastDeliveredSequenceNumber = r.lastACKSequenceNumber
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) SetNAKInterval(nakInterval uint64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.periodicNAKInterval = nakInterval
|
||||
}
|
||||
441
vendor/github.com/datarhei/gosrt/congestion/live/receive.go
generated
vendored
Normal file
441
vendor/github.com/datarhei/gosrt/congestion/live/receive.go
generated
vendored
Normal file
@ -0,0 +1,441 @@
|
||||
package live
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/gosrt/circular"
|
||||
"github.com/datarhei/gosrt/congestion"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
)
|
||||
|
||||
// ReceiveConfig is the configuration for the liveRecv congestion control
|
||||
type ReceiveConfig struct {
|
||||
InitialSequenceNumber circular.Number
|
||||
PeriodicACKInterval uint64 // microseconds
|
||||
PeriodicNAKInterval uint64 // microseconds
|
||||
OnSendACK func(seq circular.Number, light bool)
|
||||
OnSendNAK func(from, to circular.Number)
|
||||
OnDeliver func(p packet.Packet)
|
||||
}
|
||||
|
||||
// receiver implements the Receiver interface
|
||||
type receiver struct {
|
||||
maxSeenSequenceNumber circular.Number
|
||||
lastACKSequenceNumber circular.Number
|
||||
lastDeliveredSequenceNumber circular.Number
|
||||
packetList *list.List
|
||||
lock sync.RWMutex
|
||||
|
||||
nPackets uint
|
||||
|
||||
periodicACKInterval uint64 // config
|
||||
periodicNAKInterval uint64 // config
|
||||
|
||||
lastPeriodicACK uint64
|
||||
lastPeriodicNAK uint64
|
||||
|
||||
avgPayloadSize float64 // bytes
|
||||
avgLinkCapacity float64 // packets per second
|
||||
|
||||
probeTime time.Time
|
||||
probeNextSeq circular.Number
|
||||
|
||||
statistics congestion.ReceiveStats
|
||||
|
||||
rate struct {
|
||||
last uint64 // microseconds
|
||||
period uint64
|
||||
|
||||
packets uint64
|
||||
bytes uint64
|
||||
bytesRetrans uint64
|
||||
|
||||
packetsPerSecond float64
|
||||
bytesPerSecond float64
|
||||
|
||||
pktLossRate float64
|
||||
}
|
||||
|
||||
sendACK func(seq circular.Number, light bool)
|
||||
sendNAK func(from, to circular.Number)
|
||||
deliver func(p packet.Packet)
|
||||
}
|
||||
|
||||
// NewReceiver takes a ReceiveConfig and returns a new Receiver
|
||||
func NewReceiver(config ReceiveConfig) congestion.Receiver {
|
||||
r := &receiver{
|
||||
maxSeenSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
lastACKSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
lastDeliveredSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
packetList: list.New(),
|
||||
|
||||
periodicACKInterval: config.PeriodicACKInterval,
|
||||
periodicNAKInterval: config.PeriodicNAKInterval,
|
||||
|
||||
avgPayloadSize: 1456, // 5.1.2. SRT's Default LiveCC Algorithm
|
||||
|
||||
sendACK: config.OnSendACK,
|
||||
sendNAK: config.OnSendNAK,
|
||||
deliver: config.OnDeliver,
|
||||
}
|
||||
|
||||
if r.sendACK == nil {
|
||||
r.sendACK = func(seq circular.Number, light bool) {}
|
||||
}
|
||||
|
||||
if r.sendNAK == nil {
|
||||
r.sendNAK = func(from, to circular.Number) {}
|
||||
}
|
||||
|
||||
if r.deliver == nil {
|
||||
r.deliver = func(p packet.Packet) {}
|
||||
}
|
||||
|
||||
r.rate.last = 0
|
||||
r.rate.period = uint64(time.Second.Microseconds())
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *receiver) Stats() congestion.ReceiveStats {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.statistics.BytePayload = uint64(r.avgPayloadSize)
|
||||
r.statistics.MbpsEstimatedRecvBandwidth = r.rate.bytesPerSecond * 8 / 1024 / 1024
|
||||
r.statistics.MbpsEstimatedLinkCapacity = r.avgLinkCapacity * packet.MAX_PAYLOAD_SIZE * 8 / 1024 / 1024
|
||||
r.statistics.PktLossRate = r.rate.pktLossRate
|
||||
|
||||
return r.statistics
|
||||
}
|
||||
|
||||
func (r *receiver) PacketRate() (pps, bps, capacity float64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
pps = r.rate.packetsPerSecond
|
||||
bps = r.rate.bytesPerSecond
|
||||
capacity = r.avgLinkCapacity
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *receiver) Flush() {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.packetList = r.packetList.Init()
|
||||
}
|
||||
|
||||
func (r *receiver) Push(pkt packet.Packet) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
if pkt == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// This is not really well (not at all) described in the specs. See core.cpp and window.h
|
||||
// and search for PUMASK_SEQNO_PROBE (0xF). Every 16th and 17th packet are
|
||||
// sent in pairs. This is used as a probe for the theoretical capacity of the link.
|
||||
if !pkt.Header().RetransmittedPacketFlag {
|
||||
probe := pkt.Header().PacketSequenceNumber.Val() & 0xF
|
||||
if probe == 0 {
|
||||
r.probeTime = time.Now()
|
||||
r.probeNextSeq = pkt.Header().PacketSequenceNumber.Inc()
|
||||
} else if probe == 1 && pkt.Header().PacketSequenceNumber.Equals(r.probeNextSeq) && !r.probeTime.IsZero() && pkt.Len() != 0 {
|
||||
// The time between packets scaled to a fully loaded packet
|
||||
diff := float64(time.Since(r.probeTime).Microseconds()) * (packet.MAX_PAYLOAD_SIZE / float64(pkt.Len()))
|
||||
if diff != 0 {
|
||||
// Here we're doing an average of the measurements.
|
||||
r.avgLinkCapacity = 0.875*r.avgLinkCapacity + 0.125*1_000_000/diff
|
||||
}
|
||||
} else {
|
||||
r.probeTime = time.Time{}
|
||||
}
|
||||
} else {
|
||||
r.probeTime = time.Time{}
|
||||
}
|
||||
|
||||
r.nPackets++
|
||||
|
||||
pktLen := pkt.Len()
|
||||
|
||||
r.rate.packets++
|
||||
r.rate.bytes += pktLen
|
||||
|
||||
r.statistics.Pkt++
|
||||
r.statistics.Byte += pktLen
|
||||
|
||||
//pkt.PktTsbpdTime = pkt.Timestamp + r.delay
|
||||
if pkt.Header().RetransmittedPacketFlag {
|
||||
r.statistics.PktRetrans++
|
||||
r.statistics.ByteRetrans += pktLen
|
||||
|
||||
r.rate.bytesRetrans += pktLen
|
||||
}
|
||||
|
||||
// 5.1.2. SRT's Default LiveCC Algorithm
|
||||
r.avgPayloadSize = 0.875*r.avgPayloadSize + 0.125*float64(pktLen)
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lte(r.lastDeliveredSequenceNumber) {
|
||||
// Too old, because up until r.lastDeliveredSequenceNumber, we already delivered
|
||||
r.statistics.PktBelated++
|
||||
r.statistics.ByteBelated += pktLen
|
||||
|
||||
r.statistics.PktDrop++
|
||||
r.statistics.ByteDrop += pktLen
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lt(r.lastACKSequenceNumber) {
|
||||
// Already acknowledged, ignoring
|
||||
r.statistics.PktDrop++
|
||||
r.statistics.ByteDrop += pktLen
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Equals(r.maxSeenSequenceNumber.Inc()) {
|
||||
// In order, the packet we expected
|
||||
r.maxSeenSequenceNumber = pkt.Header().PacketSequenceNumber
|
||||
} else if pkt.Header().PacketSequenceNumber.Lte(r.maxSeenSequenceNumber) {
|
||||
// Out of order, is it a missing piece? put it in the correct position
|
||||
for e := r.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
if p.Header().PacketSequenceNumber == pkt.Header().PacketSequenceNumber {
|
||||
// Already received (has been sent more than once), ignoring
|
||||
r.statistics.PktDrop++
|
||||
r.statistics.ByteDrop += pktLen
|
||||
|
||||
break
|
||||
} else if p.Header().PacketSequenceNumber.Gt(pkt.Header().PacketSequenceNumber) {
|
||||
// Late arrival, this fills a gap
|
||||
r.statistics.PktBuf++
|
||||
r.statistics.PktUnique++
|
||||
|
||||
r.statistics.ByteBuf += pktLen
|
||||
r.statistics.ByteUnique += pktLen
|
||||
|
||||
r.packetList.InsertBefore(pkt, e)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
} else {
|
||||
// Too far ahead, there are some missing sequence numbers, immediate NAK report
|
||||
// here we can prevent a possibly unnecessary NAK with SRTO_LOXXMAXTTL
|
||||
r.sendNAK(r.maxSeenSequenceNumber.Inc(), pkt.Header().PacketSequenceNumber.Dec())
|
||||
|
||||
len := uint64(pkt.Header().PacketSequenceNumber.Distance(r.maxSeenSequenceNumber))
|
||||
r.statistics.PktLoss += len
|
||||
r.statistics.ByteLoss += len * uint64(r.avgPayloadSize)
|
||||
|
||||
r.maxSeenSequenceNumber = pkt.Header().PacketSequenceNumber
|
||||
}
|
||||
|
||||
r.statistics.PktBuf++
|
||||
r.statistics.PktUnique++
|
||||
|
||||
r.statistics.ByteBuf += pktLen
|
||||
r.statistics.ByteUnique += pktLen
|
||||
|
||||
r.packetList.PushBack(pkt)
|
||||
}
|
||||
|
||||
func (r *receiver) periodicACK(now uint64) (ok bool, sequenceNumber circular.Number, lite bool) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
// 4.8.1. Packet Acknowledgement (ACKs, ACKACKs)
|
||||
if now-r.lastPeriodicACK < r.periodicACKInterval {
|
||||
if r.nPackets >= 64 {
|
||||
lite = true // Send light ACK
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
minPktTsbpdTime, maxPktTsbpdTime := uint64(0), uint64(0)
|
||||
|
||||
ackSequenceNumber := r.lastDeliveredSequenceNumber
|
||||
|
||||
// Find the sequence number up until we have all in a row.
|
||||
// Where the first gap is (or at the end of the list) is where we can ACK to.
|
||||
|
||||
e := r.packetList.Front()
|
||||
if e != nil {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
minPktTsbpdTime = p.Header().PktTsbpdTime
|
||||
maxPktTsbpdTime = p.Header().PktTsbpdTime
|
||||
|
||||
// If there are packets that should be delivered by now, move foward.
|
||||
if p.Header().PktTsbpdTime <= now {
|
||||
for e = e.Next(); e != nil; e = e.Next() {
|
||||
p = e.Value.(packet.Packet)
|
||||
|
||||
if p.Header().PktTsbpdTime > now {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ackSequenceNumber = p.Header().PacketSequenceNumber
|
||||
maxPktTsbpdTime = p.Header().PktTsbpdTime
|
||||
|
||||
if e != nil {
|
||||
if e = e.Next(); e != nil {
|
||||
p = e.Value.(packet.Packet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) {
|
||||
ackSequenceNumber = p.Header().PacketSequenceNumber
|
||||
|
||||
for e = e.Next(); e != nil; e = e.Next() {
|
||||
p = e.Value.(packet.Packet)
|
||||
if !p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) {
|
||||
break
|
||||
}
|
||||
|
||||
ackSequenceNumber = p.Header().PacketSequenceNumber
|
||||
maxPktTsbpdTime = p.Header().PktTsbpdTime
|
||||
}
|
||||
}
|
||||
|
||||
ok = true
|
||||
sequenceNumber = ackSequenceNumber.Inc()
|
||||
|
||||
// Keep track of the last ACK's sequence. with this we can faster ignore
|
||||
// packets that come in that have a lower sequence number.
|
||||
r.lastACKSequenceNumber = ackSequenceNumber
|
||||
}
|
||||
|
||||
r.lastPeriodicACK = now
|
||||
r.nPackets = 0
|
||||
|
||||
r.statistics.MsBuf = (maxPktTsbpdTime - minPktTsbpdTime) / 1_000
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *receiver) periodicNAK(now uint64) (ok bool, from, to circular.Number) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
|
||||
if now-r.lastPeriodicNAK < r.periodicNAKInterval {
|
||||
return
|
||||
}
|
||||
|
||||
// Send a periodic NAK
|
||||
|
||||
ackSequenceNumber := r.lastDeliveredSequenceNumber
|
||||
|
||||
// Send a NAK only for the first gap.
|
||||
// Alternatively send a NAK for max. X gaps because the size of the NAK packet is limited.
|
||||
for e := r.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
if !p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) {
|
||||
nackSequenceNumber := ackSequenceNumber.Inc()
|
||||
|
||||
ok = true
|
||||
from = nackSequenceNumber
|
||||
to = p.Header().PacketSequenceNumber.Dec()
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
ackSequenceNumber = p.Header().PacketSequenceNumber
|
||||
}
|
||||
|
||||
r.lastPeriodicNAK = now
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *receiver) Tick(now uint64) {
|
||||
if ok, sequenceNumber, lite := r.periodicACK(now); ok {
|
||||
r.sendACK(sequenceNumber, lite)
|
||||
}
|
||||
|
||||
if ok, from, to := r.periodicNAK(now); ok {
|
||||
r.sendNAK(from, to)
|
||||
}
|
||||
|
||||
// Deliver packets whose PktTsbpdTime is ripe
|
||||
r.lock.Lock()
|
||||
removeList := make([]*list.Element, 0, r.packetList.Len())
|
||||
for e := r.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
if p.Header().PacketSequenceNumber.Lte(r.lastACKSequenceNumber) && p.Header().PktTsbpdTime <= now {
|
||||
r.statistics.PktBuf--
|
||||
r.statistics.ByteBuf -= p.Len()
|
||||
|
||||
r.lastDeliveredSequenceNumber = p.Header().PacketSequenceNumber
|
||||
|
||||
r.deliver(p)
|
||||
removeList = append(removeList, e)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range removeList {
|
||||
r.packetList.Remove(e)
|
||||
}
|
||||
r.lock.Unlock()
|
||||
|
||||
r.lock.Lock()
|
||||
tdiff := now - r.rate.last // microseconds
|
||||
|
||||
if tdiff > r.rate.period {
|
||||
r.rate.packetsPerSecond = float64(r.rate.packets) / (float64(tdiff) / 1000 / 1000)
|
||||
r.rate.bytesPerSecond = float64(r.rate.bytes) / (float64(tdiff) / 1000 / 1000)
|
||||
if r.rate.bytes != 0 {
|
||||
r.rate.pktLossRate = float64(r.rate.bytesRetrans) / float64(r.rate.bytes) * 100
|
||||
} else {
|
||||
r.rate.bytes = 0
|
||||
}
|
||||
|
||||
r.rate.packets = 0
|
||||
r.rate.bytes = 0
|
||||
r.rate.bytesRetrans = 0
|
||||
|
||||
r.rate.last = now
|
||||
}
|
||||
r.lock.Unlock()
|
||||
}
|
||||
|
||||
func (r *receiver) SetNAKInterval(nakInterval uint64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.periodicNAKInterval = nakInterval
|
||||
}
|
||||
|
||||
func (r *receiver) String(t uint64) string {
|
||||
var b strings.Builder
|
||||
|
||||
b.WriteString(fmt.Sprintf("maxSeen=%d lastACK=%d lastDelivered=%d\n", r.maxSeenSequenceNumber.Val(), r.lastACKSequenceNumber.Val(), r.lastDeliveredSequenceNumber.Val()))
|
||||
|
||||
r.lock.RLock()
|
||||
for e := r.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
b.WriteString(fmt.Sprintf(" %d @ %d (in %d)\n", p.Header().PacketSequenceNumber.Val(), p.Header().PktTsbpdTime, int64(p.Header().PktTsbpdTime)-int64(t)))
|
||||
}
|
||||
r.lock.RUnlock()
|
||||
|
||||
return b.String()
|
||||
}
|
||||
320
vendor/github.com/datarhei/gosrt/congestion/live/send.go
generated
vendored
Normal file
320
vendor/github.com/datarhei/gosrt/congestion/live/send.go
generated
vendored
Normal file
@ -0,0 +1,320 @@
|
||||
package live
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/gosrt/circular"
|
||||
"github.com/datarhei/gosrt/congestion"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
)
|
||||
|
||||
// SendConfig is the configuration for the liveSend congestion control
|
||||
type SendConfig struct {
|
||||
InitialSequenceNumber circular.Number
|
||||
DropThreshold uint64
|
||||
MaxBW int64
|
||||
InputBW int64
|
||||
MinInputBW int64
|
||||
OverheadBW int64
|
||||
OnDeliver func(p packet.Packet)
|
||||
}
|
||||
|
||||
// sender implements the Sender interface
|
||||
type sender struct {
|
||||
nextSequenceNumber circular.Number
|
||||
dropThreshold uint64
|
||||
|
||||
packetList *list.List
|
||||
lossList *list.List
|
||||
lock sync.RWMutex
|
||||
|
||||
avgPayloadSize float64 // bytes
|
||||
pktSndPeriod float64 // microseconds
|
||||
maxBW float64 // bytes/s
|
||||
inputBW float64 // bytes/s
|
||||
overheadBW float64 // percent
|
||||
|
||||
statistics congestion.SendStats
|
||||
|
||||
probeTime uint64
|
||||
|
||||
rate struct {
|
||||
period uint64 // microseconds
|
||||
last uint64
|
||||
|
||||
bytes uint64
|
||||
bytesSent uint64
|
||||
bytesRetrans uint64
|
||||
|
||||
estimatedInputBW float64 // bytes/s
|
||||
estimatedSentBW float64 // bytes/s
|
||||
|
||||
pktLossRate float64
|
||||
}
|
||||
|
||||
deliver func(p packet.Packet)
|
||||
}
|
||||
|
||||
// NewSender takes a SendConfig and returns a new Sender
|
||||
func NewSender(config SendConfig) congestion.Sender {
|
||||
s := &sender{
|
||||
nextSequenceNumber: config.InitialSequenceNumber,
|
||||
dropThreshold: config.DropThreshold,
|
||||
packetList: list.New(),
|
||||
lossList: list.New(),
|
||||
|
||||
avgPayloadSize: packet.MAX_PAYLOAD_SIZE, // 5.1.2. SRT's Default LiveCC Algorithm
|
||||
maxBW: float64(config.MaxBW),
|
||||
inputBW: float64(config.InputBW),
|
||||
overheadBW: float64(config.OverheadBW),
|
||||
|
||||
deliver: config.OnDeliver,
|
||||
}
|
||||
|
||||
if s.deliver == nil {
|
||||
s.deliver = func(p packet.Packet) {}
|
||||
}
|
||||
|
||||
s.maxBW = 128 * 1024 * 1024 // 1 Gbit/s
|
||||
s.pktSndPeriod = (s.avgPayloadSize + 16) * 1_000_000 / s.maxBW
|
||||
|
||||
s.rate.period = uint64(time.Second.Microseconds())
|
||||
s.rate.last = 0
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *sender) Stats() congestion.SendStats {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.statistics.UsPktSndPeriod = s.pktSndPeriod
|
||||
s.statistics.BytePayload = uint64(s.avgPayloadSize)
|
||||
s.statistics.MsBuf = 0
|
||||
|
||||
max := s.lossList.Back()
|
||||
min := s.lossList.Front()
|
||||
|
||||
if max != nil && min != nil {
|
||||
s.statistics.MsBuf = (max.Value.(packet.Packet).Header().PktTsbpdTime - min.Value.(packet.Packet).Header().PktTsbpdTime) / 1_000
|
||||
}
|
||||
|
||||
s.statistics.MbpsEstimatedInputBandwidth = s.rate.estimatedInputBW * 8 / 1024 / 1024
|
||||
s.statistics.MbpsEstimatedSentBandwidth = s.rate.estimatedSentBW * 8 / 1024 / 1024
|
||||
|
||||
s.statistics.PktLossRate = s.rate.pktLossRate
|
||||
|
||||
return s.statistics
|
||||
}
|
||||
|
||||
func (s *sender) Flush() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.packetList = s.packetList.Init()
|
||||
s.lossList = s.lossList.Init()
|
||||
}
|
||||
|
||||
func (s *sender) Push(p packet.Packet) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Give to the packet a sequence number
|
||||
p.Header().PacketSequenceNumber = s.nextSequenceNumber
|
||||
p.Header().PacketPositionFlag = packet.SinglePacket
|
||||
p.Header().OrderFlag = false
|
||||
p.Header().MessageNumber = 1
|
||||
|
||||
s.nextSequenceNumber = s.nextSequenceNumber.Inc()
|
||||
|
||||
pktLen := p.Len()
|
||||
|
||||
s.statistics.PktBuf++
|
||||
s.statistics.ByteBuf += pktLen
|
||||
|
||||
// Input bandwidth calculation
|
||||
s.rate.bytes += pktLen
|
||||
|
||||
p.Header().Timestamp = uint32(p.Header().PktTsbpdTime & uint64(packet.MAX_TIMESTAMP))
|
||||
|
||||
// Every 16th and 17th packet should be sent at the same time in order
|
||||
// for the receiver to determine the link capacity. Not really well
|
||||
// documented in the specs.
|
||||
// PktTsbpdTime is used for the timing of sending the packets. Here we
|
||||
// can modify it because it has already been used to set the packet's
|
||||
// timestamp.
|
||||
probe := p.Header().PacketSequenceNumber.Val() & 0xF
|
||||
if probe == 0 {
|
||||
s.probeTime = p.Header().PktTsbpdTime
|
||||
} else if probe == 1 {
|
||||
p.Header().PktTsbpdTime = s.probeTime
|
||||
}
|
||||
|
||||
s.packetList.PushBack(p)
|
||||
|
||||
s.statistics.PktFlightSize = uint64(s.packetList.Len())
|
||||
}
|
||||
|
||||
func (s *sender) Tick(now uint64) {
|
||||
// Deliver packets whose PktTsbpdTime is ripe
|
||||
s.lock.Lock()
|
||||
removeList := make([]*list.Element, 0, s.packetList.Len())
|
||||
for e := s.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
if p.Header().PktTsbpdTime <= now {
|
||||
s.statistics.Pkt++
|
||||
s.statistics.PktUnique++
|
||||
|
||||
pktLen := p.Len()
|
||||
|
||||
s.statistics.Byte += pktLen
|
||||
s.statistics.ByteUnique += pktLen
|
||||
|
||||
s.statistics.UsSndDuration += uint64(s.pktSndPeriod)
|
||||
|
||||
// 5.1.2. SRT's Default LiveCC Algorithm
|
||||
s.avgPayloadSize = 0.875*s.avgPayloadSize + 0.125*float64(pktLen)
|
||||
|
||||
s.rate.bytesSent += pktLen
|
||||
|
||||
s.deliver(p)
|
||||
removeList = append(removeList, e)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range removeList {
|
||||
s.lossList.PushBack(e.Value)
|
||||
s.packetList.Remove(e)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
s.lock.Lock()
|
||||
removeList = make([]*list.Element, 0, s.lossList.Len())
|
||||
for e := s.lossList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
if p.Header().PktTsbpdTime+s.dropThreshold <= now {
|
||||
// Dropped packet because too old
|
||||
s.statistics.PktDrop++
|
||||
s.statistics.PktLoss++
|
||||
s.statistics.ByteDrop += p.Len()
|
||||
s.statistics.ByteLoss += p.Len()
|
||||
|
||||
removeList = append(removeList, e)
|
||||
}
|
||||
}
|
||||
|
||||
// These packets are not needed anymore (too late)
|
||||
for _, e := range removeList {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
s.statistics.PktBuf--
|
||||
s.statistics.ByteBuf -= p.Len()
|
||||
|
||||
s.lossList.Remove(e)
|
||||
|
||||
// This packet has been ACK'd and we don't need it anymore
|
||||
p.Decommission()
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
s.lock.Lock()
|
||||
tdiff := now - s.rate.last
|
||||
|
||||
if tdiff > s.rate.period {
|
||||
s.rate.estimatedInputBW = float64(s.rate.bytes) / (float64(tdiff) / 1000 / 1000)
|
||||
s.rate.estimatedSentBW = float64(s.rate.bytesSent) / (float64(tdiff) / 1000 / 1000)
|
||||
if s.rate.bytesSent != 0 {
|
||||
s.rate.pktLossRate = float64(s.rate.bytesRetrans) / float64(s.rate.bytesSent) * 100
|
||||
} else {
|
||||
s.rate.pktLossRate = 0
|
||||
}
|
||||
|
||||
s.rate.bytes = 0
|
||||
s.rate.bytesSent = 0
|
||||
s.rate.bytesRetrans = 0
|
||||
|
||||
s.rate.last = now
|
||||
}
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
func (s *sender) ACK(sequenceNumber circular.Number) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
removeList := make([]*list.Element, 0, s.lossList.Len())
|
||||
for e := s.lossList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
if p.Header().PacketSequenceNumber.Lt(sequenceNumber) {
|
||||
// Remove packet from buffer because it has been successfully transmitted
|
||||
removeList = append(removeList, e)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// These packets are not needed anymore (ACK'd)
|
||||
for _, e := range removeList {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
s.statistics.PktBuf--
|
||||
s.statistics.ByteBuf -= p.Len()
|
||||
|
||||
s.lossList.Remove(e)
|
||||
|
||||
// This packet has been ACK'd and we don't need it anymore
|
||||
p.Decommission()
|
||||
}
|
||||
|
||||
s.pktSndPeriod = (s.avgPayloadSize + 16) * 1000000 / s.maxBW
|
||||
}
|
||||
|
||||
func (s *sender) NAK(sequenceNumbers []circular.Number) {
|
||||
if len(sequenceNumbers) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for e := s.lossList.Back(); e != nil; e = e.Prev() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
for i := 0; i < len(sequenceNumbers); i += 2 {
|
||||
if p.Header().PacketSequenceNumber.Gte(sequenceNumbers[i]) && p.Header().PacketSequenceNumber.Lte(sequenceNumbers[i+1]) {
|
||||
s.statistics.PktRetrans++
|
||||
s.statistics.Pkt++
|
||||
s.statistics.PktLoss++
|
||||
|
||||
s.statistics.ByteRetrans += p.Len()
|
||||
s.statistics.Byte += p.Len()
|
||||
s.statistics.ByteLoss += p.Len()
|
||||
|
||||
// 5.1.2. SRT's Default LiveCC Algorithm
|
||||
s.avgPayloadSize = 0.875*s.avgPayloadSize + 0.125*float64(p.Len())
|
||||
|
||||
s.rate.bytesSent += p.Len()
|
||||
s.rate.bytesRetrans += p.Len()
|
||||
|
||||
p.Header().RetransmittedPacketFlag = true
|
||||
s.deliver(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sender) SetDropThreshold(threshold uint64) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.dropThreshold = threshold
|
||||
}
|
||||
32
vendor/github.com/datarhei/gosrt/connection.go
generated
vendored
32
vendor/github.com/datarhei/gosrt/connection.go
generated
vendored
@ -11,10 +11,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/gosrt/internal/circular"
|
||||
"github.com/datarhei/gosrt/internal/congestion"
|
||||
"github.com/datarhei/gosrt/internal/crypto"
|
||||
"github.com/datarhei/gosrt/internal/packet"
|
||||
"github.com/datarhei/gosrt/circular"
|
||||
"github.com/datarhei/gosrt/congestion"
|
||||
"github.com/datarhei/gosrt/congestion/live"
|
||||
"github.com/datarhei/gosrt/crypto"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
)
|
||||
|
||||
// Conn is a SRT network connection.
|
||||
@ -24,11 +25,20 @@ type Conn interface {
|
||||
// time limit; see SetDeadline and SetReadDeadline.
|
||||
Read(p []byte) (int, error)
|
||||
|
||||
// ReadPacket reads a packet from the queue of received packets. It blocks
|
||||
// if the queue is empty. Only data packets are returned. Using ReadPacket
|
||||
// and Read at the same time may lead to data loss.
|
||||
ReadPacket() (packet.Packet, error)
|
||||
|
||||
// Write writes data to the connection.
|
||||
// Write can be made to time out and return an error after a fixed
|
||||
// time limit; see SetDeadline and SetWriteDeadline.
|
||||
Write(p []byte) (int, error)
|
||||
|
||||
// WritePacket writes a packet to the write queue. Packets on the write queue
|
||||
// will be sent to the peer of the connection. Only data packets will be sent.
|
||||
WritePacket(p packet.Packet) error
|
||||
|
||||
// Close closes the connection.
|
||||
// Any blocked Read or Write operations will be unblocked and return errors.
|
||||
Close() error
|
||||
@ -252,7 +262,7 @@ func newSRTConn(config srtConnConfig) *srtConn {
|
||||
|
||||
// 4.8.1. Packet Acknowledgement (ACKs, ACKACKs) -> periodicACK = 10 milliseconds
|
||||
// 4.8.2. Packet Retransmission (NAKs) -> periodicNAK at least 20 milliseconds
|
||||
c.recv = congestion.NewLiveReceive(congestion.ReceiveConfig{
|
||||
c.recv = live.NewReceiver(live.ReceiveConfig{
|
||||
InitialSequenceNumber: c.initialPacketSequenceNumber,
|
||||
PeriodicACKInterval: 10_000,
|
||||
PeriodicNAKInterval: 20_000,
|
||||
@ -269,7 +279,7 @@ func newSRTConn(config srtConnConfig) *srtConn {
|
||||
}
|
||||
c.dropThreshold += 20_000
|
||||
|
||||
c.snd = congestion.NewLiveSend(congestion.SendConfig{
|
||||
c.snd = live.NewSender(live.SendConfig{
|
||||
InitialSequenceNumber: c.initialPacketSequenceNumber,
|
||||
DropThreshold: c.dropThreshold,
|
||||
MaxBW: c.config.MaxBW,
|
||||
@ -366,9 +376,7 @@ func (c *srtConn) ticker(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// readPacket reads a packet from the queue of received packets. It blocks
|
||||
// if the queue is empty. Only data packets are returned.
|
||||
func (c *srtConn) readPacket() (packet.Packet, error) {
|
||||
func (c *srtConn) ReadPacket() (packet.Packet, error) {
|
||||
var p packet.Packet
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
@ -399,7 +407,7 @@ func (c *srtConn) Read(b []byte) (int, error) {
|
||||
|
||||
c.readBuffer.Reset()
|
||||
|
||||
p, err := c.readPacket()
|
||||
p, err := c.ReadPacket()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -412,9 +420,9 @@ func (c *srtConn) Read(b []byte) (int, error) {
|
||||
return c.readBuffer.Read(b)
|
||||
}
|
||||
|
||||
// writePacket writes a packet to the write queue. Packets on the write queue
|
||||
// WritePacket writes a packet to the write queue. Packets on the write queue
|
||||
// will be sent to the peer of the connection. Only data packets will be sent.
|
||||
func (c *srtConn) writePacket(p packet.Packet) error {
|
||||
func (c *srtConn) WritePacket(p packet.Packet) error {
|
||||
if p.Header().IsControlPacket {
|
||||
// Ignore control packets
|
||||
return nil
|
||||
|
||||
@ -4,13 +4,13 @@ package crypto
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/datarhei/gosrt/internal/packet"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
"github.com/datarhei/gosrt/rand"
|
||||
|
||||
"github.com/benburkert/openpgp/aes/keywrap"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
16
vendor/github.com/datarhei/gosrt/dial.go
generated
vendored
16
vendor/github.com/datarhei/gosrt/dial.go
generated
vendored
@ -11,10 +11,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/gosrt/internal/circular"
|
||||
"github.com/datarhei/gosrt/internal/crypto"
|
||||
"github.com/datarhei/gosrt/internal/packet"
|
||||
"github.com/datarhei/gosrt/internal/rand"
|
||||
"github.com/datarhei/gosrt/circular"
|
||||
"github.com/datarhei/gosrt/crypto"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
"github.com/datarhei/gosrt/rand"
|
||||
)
|
||||
|
||||
// ErrClientClosed is returned when the client connection has
|
||||
@ -701,7 +701,7 @@ func (dl *dialer) Read(p []byte) (n int, err error) {
|
||||
return dl.conn.Read(p)
|
||||
}
|
||||
|
||||
func (dl *dialer) readPacket() (packet.Packet, error) {
|
||||
func (dl *dialer) ReadPacket() (packet.Packet, error) {
|
||||
if err := dl.checkConnection(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -713,7 +713,7 @@ func (dl *dialer) readPacket() (packet.Packet, error) {
|
||||
return nil, fmt.Errorf("no connection")
|
||||
}
|
||||
|
||||
return dl.conn.readPacket()
|
||||
return dl.conn.ReadPacket()
|
||||
}
|
||||
|
||||
func (dl *dialer) Write(p []byte) (n int, err error) {
|
||||
@ -731,7 +731,7 @@ func (dl *dialer) Write(p []byte) (n int, err error) {
|
||||
return dl.conn.Write(p)
|
||||
}
|
||||
|
||||
func (dl *dialer) writePacket(p packet.Packet) error {
|
||||
func (dl *dialer) WritePacket(p packet.Packet) error {
|
||||
if err := dl.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -743,7 +743,7 @@ func (dl *dialer) writePacket(p packet.Packet) error {
|
||||
return fmt.Errorf("no connection")
|
||||
}
|
||||
|
||||
return dl.conn.writePacket(p)
|
||||
return dl.conn.WritePacket(p)
|
||||
}
|
||||
|
||||
func (dl *dialer) SetDeadline(t time.Time) error { return dl.conn.SetDeadline(t) }
|
||||
|
||||
117
vendor/github.com/datarhei/gosrt/internal/congestion/congestion.go
generated
vendored
117
vendor/github.com/datarhei/gosrt/internal/congestion/congestion.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
// Package congestions provides congestion control implementations for SRT
|
||||
package congestion
|
||||
|
||||
import (
|
||||
"github.com/datarhei/gosrt/internal/circular"
|
||||
"github.com/datarhei/gosrt/internal/packet"
|
||||
)
|
||||
|
||||
// SendConfig is the configuration for the liveSend congestion control
|
||||
type SendConfig struct {
|
||||
InitialSequenceNumber circular.Number
|
||||
DropThreshold uint64
|
||||
MaxBW int64
|
||||
InputBW int64
|
||||
MinInputBW int64
|
||||
OverheadBW int64
|
||||
OnDeliver func(p packet.Packet)
|
||||
}
|
||||
|
||||
// Sender is the sending part of the congestion control
|
||||
type Sender interface {
|
||||
Stats() SendStats
|
||||
Flush()
|
||||
Push(p packet.Packet)
|
||||
Tick(now uint64)
|
||||
ACK(sequenceNumber circular.Number)
|
||||
NAK(sequenceNumbers []circular.Number)
|
||||
SetDropThreshold(threshold uint64)
|
||||
}
|
||||
|
||||
// ReceiveConfig is the configuration for the liveResv congestion control
|
||||
type ReceiveConfig struct {
|
||||
InitialSequenceNumber circular.Number
|
||||
PeriodicACKInterval uint64 // microseconds
|
||||
PeriodicNAKInterval uint64 // microseconds
|
||||
OnSendACK func(seq circular.Number, light bool)
|
||||
OnSendNAK func(from, to circular.Number)
|
||||
OnDeliver func(p packet.Packet)
|
||||
}
|
||||
|
||||
// Receiver is the receiving part of the congestion control
|
||||
type Receiver interface {
|
||||
Stats() ReceiveStats
|
||||
PacketRate() (pps, bps, capacity float64)
|
||||
Flush()
|
||||
Push(pkt packet.Packet)
|
||||
Tick(now uint64)
|
||||
SetNAKInterval(nakInterval uint64)
|
||||
}
|
||||
|
||||
// SendStats are collected statistics from liveSend
|
||||
type SendStats struct {
|
||||
Pkt uint64 // Sent packets in total
|
||||
Byte uint64 // Sent bytes in total
|
||||
|
||||
PktUnique uint64
|
||||
ByteUnique uint64
|
||||
|
||||
PktLoss uint64
|
||||
ByteLoss uint64
|
||||
|
||||
PktRetrans uint64
|
||||
ByteRetrans uint64
|
||||
|
||||
UsSndDuration uint64 // microseconds
|
||||
|
||||
PktDrop uint64
|
||||
ByteDrop uint64
|
||||
|
||||
// instantaneous
|
||||
PktBuf uint64
|
||||
ByteBuf uint64
|
||||
MsBuf uint64
|
||||
|
||||
PktFlightSize uint64
|
||||
|
||||
UsPktSndPeriod float64 // microseconds
|
||||
BytePayload uint64
|
||||
|
||||
MbpsEstimatedInputBandwidth float64
|
||||
MbpsEstimatedSentBandwidth float64
|
||||
|
||||
PktLossRate float64
|
||||
}
|
||||
|
||||
// ReceiveStats are collected statistics from liveRecv
|
||||
type ReceiveStats struct {
|
||||
Pkt uint64
|
||||
Byte uint64
|
||||
|
||||
PktUnique uint64
|
||||
ByteUnique uint64
|
||||
|
||||
PktLoss uint64
|
||||
ByteLoss uint64
|
||||
|
||||
PktRetrans uint64
|
||||
ByteRetrans uint64
|
||||
|
||||
PktBelated uint64
|
||||
ByteBelated uint64
|
||||
|
||||
PktDrop uint64
|
||||
ByteDrop uint64
|
||||
|
||||
// instantaneous
|
||||
PktBuf uint64
|
||||
ByteBuf uint64
|
||||
MsBuf uint64
|
||||
|
||||
BytePayload uint64
|
||||
|
||||
MbpsEstimatedRecvBandwidth float64
|
||||
MbpsEstimatedLinkCapacity float64
|
||||
|
||||
PktLossRate float64
|
||||
}
|
||||
900
vendor/github.com/datarhei/gosrt/internal/congestion/live.go
generated
vendored
900
vendor/github.com/datarhei/gosrt/internal/congestion/live.go
generated
vendored
@ -1,900 +0,0 @@
|
||||
package congestion
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/gosrt/internal/circular"
|
||||
"github.com/datarhei/gosrt/internal/packet"
|
||||
)
|
||||
|
||||
// liveSend implements the Sender interface
|
||||
type liveSend struct {
|
||||
nextSequenceNumber circular.Number
|
||||
dropThreshold uint64
|
||||
|
||||
packetList *list.List
|
||||
lossList *list.List
|
||||
lock sync.RWMutex
|
||||
|
||||
avgPayloadSize float64 // bytes
|
||||
pktSndPeriod float64 // microseconds
|
||||
maxBW float64 // bytes/s
|
||||
inputBW float64 // bytes/s
|
||||
overheadBW float64 // percent
|
||||
|
||||
statistics SendStats
|
||||
|
||||
probeTime uint64
|
||||
|
||||
rate struct {
|
||||
period uint64 // microseconds
|
||||
last uint64
|
||||
|
||||
bytes uint64
|
||||
bytesSent uint64
|
||||
bytesRetrans uint64
|
||||
|
||||
estimatedInputBW float64 // bytes/s
|
||||
estimatedSentBW float64 // bytes/s
|
||||
|
||||
pktLossRate float64
|
||||
}
|
||||
|
||||
deliver func(p packet.Packet)
|
||||
}
|
||||
|
||||
// NewLiveSend takes a SendConfig and returns a new Sender
|
||||
func NewLiveSend(config SendConfig) Sender {
|
||||
s := &liveSend{
|
||||
nextSequenceNumber: config.InitialSequenceNumber,
|
||||
dropThreshold: config.DropThreshold,
|
||||
packetList: list.New(),
|
||||
lossList: list.New(),
|
||||
|
||||
avgPayloadSize: packet.MAX_PAYLOAD_SIZE, // 5.1.2. SRT's Default LiveCC Algorithm
|
||||
maxBW: float64(config.MaxBW),
|
||||
inputBW: float64(config.InputBW),
|
||||
overheadBW: float64(config.OverheadBW),
|
||||
|
||||
deliver: config.OnDeliver,
|
||||
}
|
||||
|
||||
if s.deliver == nil {
|
||||
s.deliver = func(p packet.Packet) {}
|
||||
}
|
||||
|
||||
s.maxBW = 128 * 1024 * 1024 // 1 Gbit/s
|
||||
s.pktSndPeriod = (s.avgPayloadSize + 16) * 1_000_000 / s.maxBW
|
||||
|
||||
s.rate.period = uint64(time.Second.Microseconds())
|
||||
s.rate.last = 0
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *liveSend) Stats() SendStats {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.statistics.UsPktSndPeriod = s.pktSndPeriod
|
||||
s.statistics.BytePayload = uint64(s.avgPayloadSize)
|
||||
s.statistics.MsBuf = 0
|
||||
|
||||
max := s.lossList.Back()
|
||||
min := s.lossList.Front()
|
||||
|
||||
if max != nil && min != nil {
|
||||
s.statistics.MsBuf = (max.Value.(packet.Packet).Header().PktTsbpdTime - min.Value.(packet.Packet).Header().PktTsbpdTime) / 1_000
|
||||
}
|
||||
|
||||
s.statistics.MbpsEstimatedInputBandwidth = s.rate.estimatedInputBW * 8 / 1024 / 1024
|
||||
s.statistics.MbpsEstimatedSentBandwidth = s.rate.estimatedSentBW * 8 / 1024 / 1024
|
||||
|
||||
s.statistics.PktLossRate = s.rate.pktLossRate
|
||||
|
||||
return s.statistics
|
||||
}
|
||||
|
||||
func (s *liveSend) Flush() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.packetList = s.packetList.Init()
|
||||
s.lossList = s.lossList.Init()
|
||||
}
|
||||
|
||||
func (s *liveSend) Push(p packet.Packet) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Give to the packet a sequence number
|
||||
p.Header().PacketSequenceNumber = s.nextSequenceNumber
|
||||
p.Header().PacketPositionFlag = packet.SinglePacket
|
||||
p.Header().OrderFlag = false
|
||||
p.Header().MessageNumber = 1
|
||||
|
||||
s.nextSequenceNumber = s.nextSequenceNumber.Inc()
|
||||
|
||||
pktLen := p.Len()
|
||||
|
||||
s.statistics.PktBuf++
|
||||
s.statistics.ByteBuf += pktLen
|
||||
|
||||
// Input bandwidth calculation
|
||||
s.rate.bytes += pktLen
|
||||
|
||||
p.Header().Timestamp = uint32(p.Header().PktTsbpdTime & uint64(packet.MAX_TIMESTAMP))
|
||||
|
||||
// Every 16th and 17th packet should be sent at the same time in order
|
||||
// for the receiver to determine the link capacity. Not really well
|
||||
// documented in the specs.
|
||||
// PktTsbpdTime is used for the timing of sending the packets. Here we
|
||||
// can modify it because it has already been used to set the packet's
|
||||
// timestamp.
|
||||
probe := p.Header().PacketSequenceNumber.Val() & 0xF
|
||||
if probe == 0 {
|
||||
s.probeTime = p.Header().PktTsbpdTime
|
||||
} else if probe == 1 {
|
||||
p.Header().PktTsbpdTime = s.probeTime
|
||||
}
|
||||
|
||||
s.packetList.PushBack(p)
|
||||
|
||||
s.statistics.PktFlightSize = uint64(s.packetList.Len())
|
||||
}
|
||||
|
||||
func (s *liveSend) Tick(now uint64) {
|
||||
// Deliver packets whose PktTsbpdTime is ripe
|
||||
s.lock.Lock()
|
||||
removeList := make([]*list.Element, 0, s.packetList.Len())
|
||||
for e := s.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
if p.Header().PktTsbpdTime <= now {
|
||||
s.statistics.Pkt++
|
||||
s.statistics.PktUnique++
|
||||
|
||||
pktLen := p.Len()
|
||||
|
||||
s.statistics.Byte += pktLen
|
||||
s.statistics.ByteUnique += pktLen
|
||||
|
||||
s.statistics.UsSndDuration += uint64(s.pktSndPeriod)
|
||||
|
||||
// 5.1.2. SRT's Default LiveCC Algorithm
|
||||
s.avgPayloadSize = 0.875*s.avgPayloadSize + 0.125*float64(pktLen)
|
||||
|
||||
s.rate.bytesSent += pktLen
|
||||
|
||||
s.deliver(p)
|
||||
removeList = append(removeList, e)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range removeList {
|
||||
s.lossList.PushBack(e.Value)
|
||||
s.packetList.Remove(e)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
s.lock.Lock()
|
||||
removeList = make([]*list.Element, 0, s.lossList.Len())
|
||||
for e := s.lossList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
if p.Header().PktTsbpdTime+s.dropThreshold <= now {
|
||||
// Dropped packet because too old
|
||||
s.statistics.PktDrop++
|
||||
s.statistics.PktLoss++
|
||||
s.statistics.ByteDrop += p.Len()
|
||||
s.statistics.ByteLoss += p.Len()
|
||||
|
||||
removeList = append(removeList, e)
|
||||
}
|
||||
}
|
||||
|
||||
// These packets are not needed anymore (too late)
|
||||
for _, e := range removeList {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
s.statistics.PktBuf--
|
||||
s.statistics.ByteBuf -= p.Len()
|
||||
|
||||
s.lossList.Remove(e)
|
||||
|
||||
// This packet has been ACK'd and we don't need it anymore
|
||||
p.Decommission()
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
s.lock.Lock()
|
||||
tdiff := now - s.rate.last
|
||||
|
||||
if tdiff > s.rate.period {
|
||||
s.rate.estimatedInputBW = float64(s.rate.bytes) / (float64(tdiff) / 1000 / 1000)
|
||||
s.rate.estimatedSentBW = float64(s.rate.bytesSent) / (float64(tdiff) / 1000 / 1000)
|
||||
if s.rate.bytesSent != 0 {
|
||||
s.rate.pktLossRate = float64(s.rate.bytesRetrans) / float64(s.rate.bytesSent) * 100
|
||||
} else {
|
||||
s.rate.pktLossRate = 0
|
||||
}
|
||||
|
||||
s.rate.bytes = 0
|
||||
s.rate.bytesSent = 0
|
||||
s.rate.bytesRetrans = 0
|
||||
|
||||
s.rate.last = now
|
||||
}
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
func (s *liveSend) ACK(sequenceNumber circular.Number) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
removeList := make([]*list.Element, 0, s.lossList.Len())
|
||||
for e := s.lossList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
if p.Header().PacketSequenceNumber.Lt(sequenceNumber) {
|
||||
// Remove packet from buffer because it has been successfully transmitted
|
||||
removeList = append(removeList, e)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// These packets are not needed anymore (ACK'd)
|
||||
for _, e := range removeList {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
s.statistics.PktBuf--
|
||||
s.statistics.ByteBuf -= p.Len()
|
||||
|
||||
s.lossList.Remove(e)
|
||||
|
||||
// This packet has been ACK'd and we don't need it anymore
|
||||
p.Decommission()
|
||||
}
|
||||
|
||||
s.pktSndPeriod = (s.avgPayloadSize + 16) * 1000000 / s.maxBW
|
||||
}
|
||||
|
||||
func (s *liveSend) NAK(sequenceNumbers []circular.Number) {
|
||||
if len(sequenceNumbers) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for e := s.lossList.Back(); e != nil; e = e.Prev() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
for i := 0; i < len(sequenceNumbers); i += 2 {
|
||||
if p.Header().PacketSequenceNumber.Gte(sequenceNumbers[i]) && p.Header().PacketSequenceNumber.Lte(sequenceNumbers[i+1]) {
|
||||
s.statistics.PktRetrans++
|
||||
s.statistics.Pkt++
|
||||
s.statistics.PktLoss++
|
||||
|
||||
s.statistics.ByteRetrans += p.Len()
|
||||
s.statistics.Byte += p.Len()
|
||||
s.statistics.ByteLoss += p.Len()
|
||||
|
||||
// 5.1.2. SRT's Default LiveCC Algorithm
|
||||
s.avgPayloadSize = 0.875*s.avgPayloadSize + 0.125*float64(p.Len())
|
||||
|
||||
s.rate.bytesSent += p.Len()
|
||||
s.rate.bytesRetrans += p.Len()
|
||||
|
||||
p.Header().RetransmittedPacketFlag = true
|
||||
s.deliver(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *liveSend) SetDropThreshold(threshold uint64) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.dropThreshold = threshold
|
||||
}
|
||||
|
||||
// liveReceive implements the Receiver interface
|
||||
type liveReceive struct {
|
||||
maxSeenSequenceNumber circular.Number
|
||||
lastACKSequenceNumber circular.Number
|
||||
lastDeliveredSequenceNumber circular.Number
|
||||
packetList *list.List
|
||||
lock sync.RWMutex
|
||||
|
||||
nPackets uint
|
||||
|
||||
periodicACKInterval uint64 // config
|
||||
periodicNAKInterval uint64 // config
|
||||
|
||||
lastPeriodicACK uint64
|
||||
lastPeriodicNAK uint64
|
||||
|
||||
avgPayloadSize float64 // bytes
|
||||
avgLinkCapacity float64 // packets per second
|
||||
|
||||
probeTime time.Time
|
||||
probeNextSeq circular.Number
|
||||
|
||||
statistics ReceiveStats
|
||||
|
||||
rate struct {
|
||||
last uint64 // microseconds
|
||||
period uint64
|
||||
|
||||
packets uint64
|
||||
bytes uint64
|
||||
bytesRetrans uint64
|
||||
|
||||
packetsPerSecond float64
|
||||
bytesPerSecond float64
|
||||
|
||||
pktLossRate float64
|
||||
}
|
||||
|
||||
sendACK func(seq circular.Number, light bool)
|
||||
sendNAK func(from, to circular.Number)
|
||||
deliver func(p packet.Packet)
|
||||
}
|
||||
|
||||
// NewLiveReceive takes a ReceiveConfig and returns a new Receiver
|
||||
func NewLiveReceive(config ReceiveConfig) Receiver {
|
||||
r := &liveReceive{
|
||||
maxSeenSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
lastACKSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
lastDeliveredSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
packetList: list.New(),
|
||||
|
||||
periodicACKInterval: config.PeriodicACKInterval,
|
||||
periodicNAKInterval: config.PeriodicNAKInterval,
|
||||
|
||||
avgPayloadSize: 1456, // 5.1.2. SRT's Default LiveCC Algorithm
|
||||
|
||||
sendACK: config.OnSendACK,
|
||||
sendNAK: config.OnSendNAK,
|
||||
deliver: config.OnDeliver,
|
||||
}
|
||||
|
||||
if r.sendACK == nil {
|
||||
r.sendACK = func(seq circular.Number, light bool) {}
|
||||
}
|
||||
|
||||
if r.sendNAK == nil {
|
||||
r.sendNAK = func(from, to circular.Number) {}
|
||||
}
|
||||
|
||||
if r.deliver == nil {
|
||||
r.deliver = func(p packet.Packet) {}
|
||||
}
|
||||
|
||||
r.rate.last = 0
|
||||
r.rate.period = uint64(time.Second.Microseconds())
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *liveReceive) Stats() ReceiveStats {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.statistics.BytePayload = uint64(r.avgPayloadSize)
|
||||
r.statistics.MbpsEstimatedRecvBandwidth = r.rate.bytesPerSecond * 8 / 1024 / 1024
|
||||
r.statistics.MbpsEstimatedLinkCapacity = r.avgLinkCapacity * packet.MAX_PAYLOAD_SIZE * 8 / 1024 / 1024
|
||||
r.statistics.PktLossRate = r.rate.pktLossRate
|
||||
|
||||
return r.statistics
|
||||
}
|
||||
|
||||
func (r *liveReceive) PacketRate() (pps, bps, capacity float64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
pps = r.rate.packetsPerSecond
|
||||
bps = r.rate.bytesPerSecond
|
||||
capacity = r.avgLinkCapacity
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *liveReceive) Flush() {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.packetList = r.packetList.Init()
|
||||
}
|
||||
|
||||
func (r *liveReceive) Push(pkt packet.Packet) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
if pkt == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// This is not really well (not at all) described in the specs. See core.cpp and window.h
|
||||
// and search for PUMASK_SEQNO_PROBE (0xF). Every 16th and 17th packet are
|
||||
// sent in pairs. This is used as a probe for the theoretical capacity of the link.
|
||||
if !pkt.Header().RetransmittedPacketFlag {
|
||||
probe := pkt.Header().PacketSequenceNumber.Val() & 0xF
|
||||
if probe == 0 {
|
||||
r.probeTime = time.Now()
|
||||
r.probeNextSeq = pkt.Header().PacketSequenceNumber.Inc()
|
||||
} else if probe == 1 && pkt.Header().PacketSequenceNumber.Equals(r.probeNextSeq) && !r.probeTime.IsZero() && pkt.Len() != 0 {
|
||||
// The time between packets scaled to a fully loaded packet
|
||||
diff := float64(time.Since(r.probeTime).Microseconds()) * (packet.MAX_PAYLOAD_SIZE / float64(pkt.Len()))
|
||||
if diff != 0 {
|
||||
// Here we're doing an average of the measurements.
|
||||
r.avgLinkCapacity = 0.875*r.avgLinkCapacity + 0.125*1_000_000/diff
|
||||
}
|
||||
} else {
|
||||
r.probeTime = time.Time{}
|
||||
}
|
||||
} else {
|
||||
r.probeTime = time.Time{}
|
||||
}
|
||||
|
||||
r.nPackets++
|
||||
|
||||
pktLen := pkt.Len()
|
||||
|
||||
r.rate.packets++
|
||||
r.rate.bytes += pktLen
|
||||
|
||||
r.statistics.Pkt++
|
||||
r.statistics.Byte += pktLen
|
||||
|
||||
//pkt.PktTsbpdTime = pkt.Timestamp + r.delay
|
||||
if pkt.Header().RetransmittedPacketFlag {
|
||||
r.statistics.PktRetrans++
|
||||
r.statistics.ByteRetrans += pktLen
|
||||
|
||||
r.rate.bytesRetrans += pktLen
|
||||
}
|
||||
|
||||
// 5.1.2. SRT's Default LiveCC Algorithm
|
||||
r.avgPayloadSize = 0.875*r.avgPayloadSize + 0.125*float64(pktLen)
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lte(r.lastDeliveredSequenceNumber) {
|
||||
// Too old, because up until r.lastDeliveredSequenceNumber, we already delivered
|
||||
r.statistics.PktBelated++
|
||||
r.statistics.ByteBelated += pktLen
|
||||
|
||||
r.statistics.PktDrop++
|
||||
r.statistics.ByteDrop += pktLen
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lt(r.lastACKSequenceNumber) {
|
||||
// Already acknowledged, ignoring
|
||||
r.statistics.PktDrop++
|
||||
r.statistics.ByteDrop += pktLen
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Equals(r.maxSeenSequenceNumber.Inc()) {
|
||||
// In order, the packet we expected
|
||||
r.maxSeenSequenceNumber = pkt.Header().PacketSequenceNumber
|
||||
} else if pkt.Header().PacketSequenceNumber.Lte(r.maxSeenSequenceNumber) {
|
||||
// Out of order, is it a missing piece? put it in the correct position
|
||||
for e := r.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
if p.Header().PacketSequenceNumber == pkt.Header().PacketSequenceNumber {
|
||||
// Already received (has been sent more than once), ignoring
|
||||
r.statistics.PktDrop++
|
||||
r.statistics.ByteDrop += pktLen
|
||||
|
||||
break
|
||||
} else if p.Header().PacketSequenceNumber.Gt(pkt.Header().PacketSequenceNumber) {
|
||||
// Late arrival, this fills a gap
|
||||
r.statistics.PktBuf++
|
||||
r.statistics.PktUnique++
|
||||
|
||||
r.statistics.ByteBuf += pktLen
|
||||
r.statistics.ByteUnique += pktLen
|
||||
|
||||
r.packetList.InsertBefore(pkt, e)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
} else {
|
||||
// Too far ahead, there are some missing sequence numbers, immediate NAK report
|
||||
// here we can prevent a possibly unnecessary NAK with SRTO_LOXXMAXTTL
|
||||
r.sendNAK(r.maxSeenSequenceNumber.Inc(), pkt.Header().PacketSequenceNumber.Dec())
|
||||
|
||||
len := uint64(pkt.Header().PacketSequenceNumber.Distance(r.maxSeenSequenceNumber))
|
||||
r.statistics.PktLoss += len
|
||||
r.statistics.ByteLoss += len * uint64(r.avgPayloadSize)
|
||||
|
||||
r.maxSeenSequenceNumber = pkt.Header().PacketSequenceNumber
|
||||
}
|
||||
|
||||
r.statistics.PktBuf++
|
||||
r.statistics.PktUnique++
|
||||
|
||||
r.statistics.ByteBuf += pktLen
|
||||
r.statistics.ByteUnique += pktLen
|
||||
|
||||
r.packetList.PushBack(pkt)
|
||||
}
|
||||
|
||||
func (r *liveReceive) periodicACK(now uint64) (ok bool, sequenceNumber circular.Number, lite bool) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
// 4.8.1. Packet Acknowledgement (ACKs, ACKACKs)
|
||||
if now-r.lastPeriodicACK < r.periodicACKInterval {
|
||||
if r.nPackets >= 64 {
|
||||
lite = true // Send light ACK
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
minPktTsbpdTime, maxPktTsbpdTime := uint64(0), uint64(0)
|
||||
|
||||
ackSequenceNumber := r.lastDeliveredSequenceNumber
|
||||
|
||||
// Find the sequence number up until we have all in a row.
|
||||
// Where the first gap is (or at the end of the list) is where we can ACK to.
|
||||
|
||||
e := r.packetList.Front()
|
||||
if e != nil {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
minPktTsbpdTime = p.Header().PktTsbpdTime
|
||||
maxPktTsbpdTime = p.Header().PktTsbpdTime
|
||||
|
||||
// If there are packets that should be delivered by now, move foward.
|
||||
if p.Header().PktTsbpdTime <= now {
|
||||
for e = e.Next(); e != nil; e = e.Next() {
|
||||
p = e.Value.(packet.Packet)
|
||||
|
||||
if p.Header().PktTsbpdTime > now {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ackSequenceNumber = p.Header().PacketSequenceNumber
|
||||
maxPktTsbpdTime = p.Header().PktTsbpdTime
|
||||
|
||||
if e != nil {
|
||||
if e = e.Next(); e != nil {
|
||||
p = e.Value.(packet.Packet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) {
|
||||
ackSequenceNumber = p.Header().PacketSequenceNumber
|
||||
|
||||
for e = e.Next(); e != nil; e = e.Next() {
|
||||
p = e.Value.(packet.Packet)
|
||||
if !p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) {
|
||||
break
|
||||
}
|
||||
|
||||
ackSequenceNumber = p.Header().PacketSequenceNumber
|
||||
maxPktTsbpdTime = p.Header().PktTsbpdTime
|
||||
}
|
||||
}
|
||||
|
||||
ok = true
|
||||
sequenceNumber = ackSequenceNumber.Inc()
|
||||
|
||||
// Keep track of the last ACK's sequence. with this we can faster ignore
|
||||
// packets that come in that have a lower sequence number.
|
||||
r.lastACKSequenceNumber = ackSequenceNumber
|
||||
}
|
||||
|
||||
r.lastPeriodicACK = now
|
||||
r.nPackets = 0
|
||||
|
||||
r.statistics.MsBuf = (maxPktTsbpdTime - minPktTsbpdTime) / 1_000
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *liveReceive) periodicNAK(now uint64) (ok bool, from, to circular.Number) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
|
||||
if now-r.lastPeriodicNAK < r.periodicNAKInterval {
|
||||
return
|
||||
}
|
||||
|
||||
// Send a periodic NAK
|
||||
|
||||
ackSequenceNumber := r.lastDeliveredSequenceNumber
|
||||
|
||||
// Send a NAK only for the first gap.
|
||||
// Alternatively send a NAK for max. X gaps because the size of the NAK packet is limited.
|
||||
for e := r.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
if !p.Header().PacketSequenceNumber.Equals(ackSequenceNumber.Inc()) {
|
||||
nackSequenceNumber := ackSequenceNumber.Inc()
|
||||
|
||||
ok = true
|
||||
from = nackSequenceNumber
|
||||
to = p.Header().PacketSequenceNumber.Dec()
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
ackSequenceNumber = p.Header().PacketSequenceNumber
|
||||
}
|
||||
|
||||
r.lastPeriodicNAK = now
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *liveReceive) Tick(now uint64) {
|
||||
if ok, sequenceNumber, lite := r.periodicACK(now); ok {
|
||||
r.sendACK(sequenceNumber, lite)
|
||||
}
|
||||
|
||||
if ok, from, to := r.periodicNAK(now); ok {
|
||||
r.sendNAK(from, to)
|
||||
}
|
||||
|
||||
// Deliver packets whose PktTsbpdTime is ripe
|
||||
r.lock.Lock()
|
||||
removeList := make([]*list.Element, 0, r.packetList.Len())
|
||||
for e := r.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
if p.Header().PacketSequenceNumber.Lte(r.lastACKSequenceNumber) && p.Header().PktTsbpdTime <= now {
|
||||
r.statistics.PktBuf--
|
||||
r.statistics.ByteBuf -= p.Len()
|
||||
|
||||
r.lastDeliveredSequenceNumber = p.Header().PacketSequenceNumber
|
||||
|
||||
r.deliver(p)
|
||||
removeList = append(removeList, e)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range removeList {
|
||||
r.packetList.Remove(e)
|
||||
}
|
||||
r.lock.Unlock()
|
||||
|
||||
r.lock.Lock()
|
||||
tdiff := now - r.rate.last // microseconds
|
||||
|
||||
if tdiff > r.rate.period {
|
||||
r.rate.packetsPerSecond = float64(r.rate.packets) / (float64(tdiff) / 1000 / 1000)
|
||||
r.rate.bytesPerSecond = float64(r.rate.bytes) / (float64(tdiff) / 1000 / 1000)
|
||||
if r.rate.bytes != 0 {
|
||||
r.rate.pktLossRate = float64(r.rate.bytesRetrans) / float64(r.rate.bytes) * 100
|
||||
} else {
|
||||
r.rate.bytes = 0
|
||||
}
|
||||
|
||||
r.rate.packets = 0
|
||||
r.rate.bytes = 0
|
||||
r.rate.bytesRetrans = 0
|
||||
|
||||
r.rate.last = now
|
||||
}
|
||||
r.lock.Unlock()
|
||||
}
|
||||
|
||||
func (r *liveReceive) SetNAKInterval(nakInterval uint64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.periodicNAKInterval = nakInterval
|
||||
}
|
||||
|
||||
func (r *liveReceive) String(t uint64) string {
|
||||
var b strings.Builder
|
||||
|
||||
b.WriteString(fmt.Sprintf("maxSeen=%d lastACK=%d lastDelivered=%d\n", r.maxSeenSequenceNumber.Val(), r.lastACKSequenceNumber.Val(), r.lastDeliveredSequenceNumber.Val()))
|
||||
|
||||
r.lock.RLock()
|
||||
for e := r.packetList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(packet.Packet)
|
||||
|
||||
b.WriteString(fmt.Sprintf(" %d @ %d (in %d)\n", p.Header().PacketSequenceNumber.Val(), p.Header().PktTsbpdTime, int64(p.Header().PktTsbpdTime)-int64(t)))
|
||||
}
|
||||
r.lock.RUnlock()
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
type fakeLiveReceive struct {
|
||||
maxSeenSequenceNumber circular.Number
|
||||
lastACKSequenceNumber circular.Number
|
||||
lastDeliveredSequenceNumber circular.Number
|
||||
|
||||
nPackets uint
|
||||
|
||||
periodicACKInterval uint64 // config
|
||||
periodicNAKInterval uint64 // config
|
||||
|
||||
lastPeriodicACK uint64
|
||||
|
||||
avgPayloadSize float64 // bytes
|
||||
|
||||
rate struct {
|
||||
last time.Time
|
||||
period time.Duration
|
||||
|
||||
packets uint64
|
||||
bytes uint64
|
||||
|
||||
pps float64
|
||||
bps float64
|
||||
}
|
||||
|
||||
sendACK func(seq circular.Number, light bool)
|
||||
sendNAK func(from, to circular.Number)
|
||||
deliver func(p packet.Packet)
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewFakeLiveReceive(config ReceiveConfig) Receiver {
|
||||
r := &fakeLiveReceive{
|
||||
maxSeenSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
lastACKSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
lastDeliveredSequenceNumber: config.InitialSequenceNumber.Dec(),
|
||||
|
||||
periodicACKInterval: config.PeriodicACKInterval,
|
||||
periodicNAKInterval: config.PeriodicNAKInterval,
|
||||
|
||||
avgPayloadSize: 1456, // 5.1.2. SRT's Default LiveCC Algorithm
|
||||
|
||||
sendACK: config.OnSendACK,
|
||||
sendNAK: config.OnSendNAK,
|
||||
deliver: config.OnDeliver,
|
||||
}
|
||||
|
||||
if r.sendACK == nil {
|
||||
r.sendACK = func(seq circular.Number, light bool) {}
|
||||
}
|
||||
|
||||
if r.sendNAK == nil {
|
||||
r.sendNAK = func(from, to circular.Number) {}
|
||||
}
|
||||
|
||||
if r.deliver == nil {
|
||||
r.deliver = func(p packet.Packet) {}
|
||||
}
|
||||
|
||||
r.rate.last = time.Now()
|
||||
r.rate.period = time.Second
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) Stats() ReceiveStats { return ReceiveStats{} }
|
||||
func (r *fakeLiveReceive) PacketRate() (pps, bps, capacity float64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
tdiff := time.Since(r.rate.last)
|
||||
|
||||
if tdiff < r.rate.period {
|
||||
pps = r.rate.pps
|
||||
bps = r.rate.bps
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
r.rate.pps = float64(r.rate.packets) / tdiff.Seconds()
|
||||
r.rate.bps = float64(r.rate.bytes) / tdiff.Seconds()
|
||||
|
||||
r.rate.packets, r.rate.bytes = 0, 0
|
||||
r.rate.last = time.Now()
|
||||
|
||||
pps = r.rate.pps
|
||||
bps = r.rate.bps
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) Flush() {}
|
||||
|
||||
func (r *fakeLiveReceive) Push(pkt packet.Packet) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
if pkt == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.nPackets++
|
||||
|
||||
pktLen := pkt.Len()
|
||||
|
||||
r.rate.packets++
|
||||
r.rate.bytes += pktLen
|
||||
|
||||
// 5.1.2. SRT's Default LiveCC Algorithm
|
||||
r.avgPayloadSize = 0.875*r.avgPayloadSize + 0.125*float64(pktLen)
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lte(r.lastDeliveredSequenceNumber) {
|
||||
// Too old, because up until r.lastDeliveredSequenceNumber, we already delivered
|
||||
return
|
||||
}
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lt(r.lastACKSequenceNumber) {
|
||||
// Already acknowledged, ignoring
|
||||
return
|
||||
}
|
||||
|
||||
if pkt.Header().PacketSequenceNumber.Lte(r.maxSeenSequenceNumber) {
|
||||
return
|
||||
}
|
||||
|
||||
r.maxSeenSequenceNumber = pkt.Header().PacketSequenceNumber
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) periodicACK(now uint64) (ok bool, sequenceNumber circular.Number, lite bool) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
|
||||
// 4.8.1. Packet Acknowledgement (ACKs, ACKACKs)
|
||||
if now-r.lastPeriodicACK < r.periodicACKInterval {
|
||||
if r.nPackets >= 64 {
|
||||
lite = true // Send light ACK
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ok = true
|
||||
sequenceNumber = r.maxSeenSequenceNumber.Inc()
|
||||
|
||||
r.lastACKSequenceNumber = r.maxSeenSequenceNumber
|
||||
|
||||
r.lastPeriodicACK = now
|
||||
r.nPackets = 0
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) Tick(now uint64) {
|
||||
if ok, sequenceNumber, lite := r.periodicACK(now); ok {
|
||||
r.sendACK(sequenceNumber, lite)
|
||||
}
|
||||
|
||||
// Deliver packets whose PktTsbpdTime is ripe
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.lastDeliveredSequenceNumber = r.lastACKSequenceNumber
|
||||
}
|
||||
|
||||
func (r *fakeLiveReceive) SetNAKInterval(nakInterval uint64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.periodicNAKInterval = nakInterval
|
||||
}
|
||||
6
vendor/github.com/datarhei/gosrt/listen.go
generated
vendored
6
vendor/github.com/datarhei/gosrt/listen.go
generated
vendored
@ -10,9 +10,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/gosrt/internal/crypto"
|
||||
srtnet "github.com/datarhei/gosrt/internal/net"
|
||||
"github.com/datarhei/gosrt/internal/packet"
|
||||
"github.com/datarhei/gosrt/crypto"
|
||||
srtnet "github.com/datarhei/gosrt/net"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
)
|
||||
|
||||
// ConnType represents the kind of connection as returned
|
||||
|
||||
@ -6,9 +6,10 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/gosrt/internal/rand"
|
||||
"github.com/datarhei/gosrt/rand"
|
||||
)
|
||||
|
||||
// SYNCookie implements a syn cookie for the SRT handshake.
|
||||
type SYNCookie struct {
|
||||
secret1 string
|
||||
secret2 string
|
||||
@ -20,6 +21,7 @@ func defaultCounter() int64 {
|
||||
return time.Now().Unix() >> 6
|
||||
}
|
||||
|
||||
// NewSYNCookie returns a SYNCookie for a destination address.
|
||||
func NewSYNCookie(daddr string, counter func() int64) (*SYNCookie, error) {
|
||||
s := &SYNCookie{
|
||||
daddr: daddr,
|
||||
@ -44,10 +46,12 @@ func NewSYNCookie(daddr string, counter func() int64) (*SYNCookie, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Get returns the current syn cookie with a sender address.
|
||||
func (s *SYNCookie) Get(saddr string) uint32 {
|
||||
return s.calculate(s.counter(), saddr)
|
||||
}
|
||||
|
||||
// Verify verfies that two syn cookies relate.
|
||||
func (s *SYNCookie) Verify(cookie uint32, saddr string) bool {
|
||||
counter := s.counter()
|
||||
|
||||
@ -13,8 +13,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/datarhei/gosrt/internal/circular"
|
||||
srtnet "github.com/datarhei/gosrt/internal/net"
|
||||
"github.com/datarhei/gosrt/circular"
|
||||
srtnet "github.com/datarhei/gosrt/net"
|
||||
)
|
||||
|
||||
const MAX_SEQUENCENUMBER uint32 = 0b01111111_11111111_11111111_11111111
|
||||
@ -175,17 +175,44 @@ func (h CtrlSubType) Value() uint16 {
|
||||
}
|
||||
|
||||
type Packet interface {
|
||||
// String returns a string representation of the packet.
|
||||
String() string
|
||||
|
||||
// Clone clones a packet.
|
||||
Clone() Packet
|
||||
|
||||
// Header returns a pointer to the packet header.
|
||||
Header() *PacketHeader
|
||||
|
||||
// Data returns the payload the packets holds. The packets stays the
|
||||
// owner of the data, i.e. modifying the returned data will also
|
||||
// modify the payload.
|
||||
Data() []byte
|
||||
|
||||
// SetData replaces the payload of the packet with the provided one.
|
||||
SetData([]byte)
|
||||
|
||||
// Len return the length of the payload in the packet.
|
||||
Len() uint64
|
||||
Unmarshal(data []byte) error
|
||||
|
||||
// Marshal writes the bytes representation of the packet to the provided writer.
|
||||
Marshal(w io.Writer) error
|
||||
|
||||
// Unmarshal parses the given data into the packet header and its payload. Returns an error on failure.
|
||||
Unmarshal(data []byte) error
|
||||
|
||||
// Dump returns the same as String with an additional hex-dump of the marshalled packet.
|
||||
Dump() string
|
||||
|
||||
// MarshalCIF writes the byte representation of a control information field as payload
|
||||
// of the packet. Only for control packets.
|
||||
MarshalCIF(c CIF)
|
||||
|
||||
// UnmarshalCIF parses the payload into a control information field struct. Returns an error
|
||||
// on failure.
|
||||
UnmarshalCIF(c CIF) error
|
||||
|
||||
// Decommission frees the payload. The packet shouldn't be uses afterwards.
|
||||
Decommission()
|
||||
}
|
||||
|
||||
@ -437,14 +464,21 @@ func (p *pkt) UnmarshalCIF(c CIF) error {
|
||||
return c.Unmarshal(p.payload.Bytes())
|
||||
}
|
||||
|
||||
// CIF reepresents a control information field
|
||||
type CIF interface {
|
||||
// Marshal writes a byte representation of the CIF to the provided writer.
|
||||
Marshal(w io.Writer)
|
||||
|
||||
// Unmarshal parses the provided bytes into the CIF. Returns a non nil error of failure.
|
||||
Unmarshal(data []byte) error
|
||||
|
||||
// String returns a string representation of the CIF.
|
||||
String() string
|
||||
}
|
||||
|
||||
// 3.2.1. Handshake
|
||||
|
||||
// CIFHandshake represents the SRT handshake messages.
|
||||
type CIFHandshake struct {
|
||||
IsRequest bool
|
||||
|
||||
@ -742,6 +776,8 @@ func (c *CIFHandshake) Marshal(w io.Writer) {
|
||||
}
|
||||
|
||||
// 3.2.1.1.1. Handshake Extension Message Flags
|
||||
|
||||
// CIFHandshakeExtensionFlags represents the Handshake Extension Message Flags
|
||||
type CIFHandshakeExtensionFlags struct {
|
||||
TSBPDSND bool // Defines if the TSBPD mechanism (Section 4.5) will be used for sending.
|
||||
TSBPDRCV bool // Defines if the TSBPD mechanism (Section 4.5) will be used for receiving.
|
||||
@ -755,6 +791,7 @@ type CIFHandshakeExtensionFlags struct {
|
||||
|
||||
// 3.2.1.1. Handshake Extension Message
|
||||
|
||||
// CIFHandshakeExtension represents the Handshake Extension Message
|
||||
type CIFHandshakeExtension struct {
|
||||
SRTVersion uint32
|
||||
SRTFlags CIFHandshakeExtensionFlags
|
||||
@ -864,6 +901,8 @@ const (
|
||||
KM_BADSECRET uint32 = 4
|
||||
)
|
||||
|
||||
// CIFKeyMaterialExtension represents the Key Material message. It is used as part of
|
||||
// the v5 handshake or on its own after a v4 handshake.
|
||||
type CIFKeyMaterialExtension struct {
|
||||
Error uint32
|
||||
S uint8 // This is a fixed-width field that is reserved for future usage. value = {0}
|
||||
@ -1054,6 +1093,7 @@ func (c *CIFKeyMaterialExtension) Marshal(w io.Writer) {
|
||||
|
||||
// 3.2.4. ACK (Acknowledgment)
|
||||
|
||||
// CIFACK represents an ACK message.
|
||||
type CIFACK struct {
|
||||
IsLite bool
|
||||
IsSmall bool
|
||||
@ -1156,6 +1196,7 @@ func (c *CIFACK) Marshal(w io.Writer) {
|
||||
|
||||
// 3.2.5. NAK (Loss Report)
|
||||
|
||||
// CIFNAK represents a NAK message
|
||||
type CIFNAK struct {
|
||||
LostPacketSequenceNumber []circular.Number
|
||||
}
|
||||
@ -1248,6 +1289,7 @@ func (c *CIFNAK) Marshal(w io.Writer) {
|
||||
|
||||
// 3.2.7. Shutdown
|
||||
|
||||
// CIFShutdown represents a shutdown message.
|
||||
type CIFShutdown struct{}
|
||||
|
||||
func (c CIFShutdown) String() string {
|
||||
23
vendor/github.com/datarhei/gosrt/pubsub.go
generated
vendored
23
vendor/github.com/datarhei/gosrt/pubsub.go
generated
vendored
@ -6,7 +6,7 @@ import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/datarhei/gosrt/internal/packet"
|
||||
"github.com/datarhei/gosrt/packet"
|
||||
)
|
||||
|
||||
// PubSub is a publish/subscriber service for SRT connections.
|
||||
@ -23,11 +23,6 @@ type PubSub interface {
|
||||
Subscribe(c Conn) error
|
||||
}
|
||||
|
||||
type packetReadWriter interface {
|
||||
readPacket() (packet.Packet, error)
|
||||
writePacket(p packet.Packet) error
|
||||
}
|
||||
|
||||
// pubSub is an implementation of the PubSub interface
|
||||
type pubSub struct {
|
||||
incoming chan packet.Packet
|
||||
@ -107,12 +102,6 @@ func (pb *pubSub) Publish(c Conn) error {
|
||||
|
||||
var p packet.Packet
|
||||
var err error
|
||||
conn, ok := c.(packetReadWriter)
|
||||
if !ok {
|
||||
err := fmt.Errorf("the provided connection is not a SRT connection")
|
||||
pb.logger.Print("pubsub:error", 0, 1, func() string { return err.Error() })
|
||||
return err
|
||||
}
|
||||
|
||||
socketId := c.SocketId()
|
||||
|
||||
@ -121,7 +110,7 @@ func (pb *pubSub) Publish(c Conn) error {
|
||||
pb.publish = true
|
||||
|
||||
for {
|
||||
p, err = conn.readPacket()
|
||||
p, err = c.ReadPacket()
|
||||
if err != nil {
|
||||
pb.logger.Print("pubsub:error", socketId, 1, func() string { return err.Error() })
|
||||
break
|
||||
@ -142,12 +131,6 @@ func (pb *pubSub) Publish(c Conn) error {
|
||||
func (pb *pubSub) Subscribe(c Conn) error {
|
||||
l := make(chan packet.Packet, 1024)
|
||||
socketId := c.SocketId()
|
||||
conn, ok := c.(packetReadWriter)
|
||||
if !ok {
|
||||
err := fmt.Errorf("the provided connection is not a SRT connection")
|
||||
pb.logger.Print("pubsub:error", 0, 1, func() string { return err.Error() })
|
||||
return err
|
||||
}
|
||||
|
||||
pb.logger.Print("pubsub:subscribe", socketId, 1, func() string { return "new subscriber" })
|
||||
|
||||
@ -166,7 +149,7 @@ func (pb *pubSub) Subscribe(c Conn) error {
|
||||
case <-pb.ctx.Done():
|
||||
return io.EOF
|
||||
case p := <-l:
|
||||
err := conn.writePacket(p)
|
||||
err := c.WritePacket(p)
|
||||
p.Decommission()
|
||||
if err != nil {
|
||||
pb.logger.Print("pubsub:error", socketId, 1, func() string { return err.Error() })
|
||||
|
||||
@ -18,6 +18,10 @@ func RandomString(length int, charset string) (string, error) {
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func Read(b []byte) (int, error) {
|
||||
return rand.Read(b)
|
||||
}
|
||||
|
||||
func Uint32() (uint32, error) {
|
||||
var b [4]byte
|
||||
_, err := rand.Read(b[:])
|
||||
2
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
2
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
@ -264,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
||||
knd := reflect.ValueOf(node).Kind()
|
||||
|
||||
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
|
||||
return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
|
||||
return errors.New("only structs, pointers, maps and slices are supported for setting values")
|
||||
}
|
||||
|
||||
if nameProvider == nil {
|
||||
|
||||
23
vendor/github.com/go-openapi/spec/README.md
generated
vendored
23
vendor/github.com/go-openapi/spec/README.md
generated
vendored
@ -29,3 +29,26 @@ The object model for OpenAPI specification documents.
|
||||
> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
|
||||
>
|
||||
> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3
|
||||
|
||||
* Does the unmarshaling support YAML?
|
||||
|
||||
> Not directly. The exposed types know only how to unmarshal from JSON.
|
||||
>
|
||||
> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by
|
||||
> github.com/go-openapi/loads
|
||||
>
|
||||
> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec
|
||||
>
|
||||
> See also https://github.com/go-openapi/spec/issues/164
|
||||
|
||||
* How can I validate a spec?
|
||||
|
||||
> Validation is provided by [the validate package](http://github.com/go-openapi/validate)
|
||||
|
||||
* Why do we have an `ID` field for `Schema` which is not part of the swagger spec?
|
||||
|
||||
> We found jsonschema compatibility more important: since `id` in jsonschema influences
|
||||
> how `$ref` are resolved.
|
||||
> This `id` does not conflict with any property named `id`.
|
||||
>
|
||||
> See also https://github.com/go-openapi/spec/issues/23
|
||||
|
||||
2
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
2
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
@ -57,7 +57,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
|
||||
if !options.SkipSchemas {
|
||||
for key, definition := range spec.Definitions {
|
||||
parentRefs := make([]string, 0, 10)
|
||||
parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key))
|
||||
parentRefs = append(parentRefs, "#/definitions/"+key)
|
||||
|
||||
def, err := expandSchema(definition, parentRefs, resolver, specBasePath)
|
||||
if resolver.shouldStopOnError(err) {
|
||||
|
||||
2
vendor/github.com/go-openapi/swag/initialism_index.go
generated
vendored
2
vendor/github.com/go-openapi/swag/initialism_index.go
generated
vendored
@ -176,7 +176,7 @@ func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
|
||||
func (m *indexOfInitialisms) sorted() (result []string) {
|
||||
m.sortMutex.Lock()
|
||||
defer m.sortMutex.Unlock()
|
||||
m.index.Range(func(key, value interface{}) bool {
|
||||
m.index.Range(func(key, _ interface{}) bool {
|
||||
k := key.(string)
|
||||
result = append(result, k)
|
||||
return true
|
||||
|
||||
14
vendor/github.com/go-openapi/swag/string_bytes.go
generated
vendored
14
vendor/github.com/go-openapi/swag/string_bytes.go
generated
vendored
@ -2,21 +2,7 @@ package swag
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type internalString struct {
|
||||
Data unsafe.Pointer
|
||||
Len int
|
||||
}
|
||||
|
||||
// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
|
||||
func hackStringBytes(str string) []byte {
|
||||
p := (*internalString)(unsafe.Pointer(&str)).Data
|
||||
return unsafe.Slice((*byte)(p), len(str))
|
||||
}
|
||||
|
||||
/*
|
||||
* go1.20 version (for when go mod moves to a go1.20 requirement):
|
||||
|
||||
func hackStringBytes(str string) []byte {
|
||||
return unsafe.Slice(unsafe.StringData(str), len(str))
|
||||
}
|
||||
*/
|
||||
|
||||
3
vendor/github.com/go-openapi/swag/yaml.go
generated
vendored
3
vendor/github.com/go-openapi/swag/yaml.go
generated
vendored
@ -16,6 +16,7 @@ package swag
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@ -50,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
|
||||
return nil, err
|
||||
}
|
||||
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
|
||||
return nil, fmt.Errorf("only YAML documents that are objects are supported")
|
||||
return nil, errors.New("only YAML documents that are objects are supported")
|
||||
}
|
||||
return &document, nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/go-playground/validator/v10/README.md
generated
vendored
2
vendor/github.com/go-playground/validator/v10/README.md
generated
vendored
@ -1,7 +1,7 @@
|
||||
Package validator
|
||||
=================
|
||||
<img align="right" src="logo.png">[](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||

|
||||

|
||||
[](https://travis-ci.org/go-playground/validator)
|
||||
[](https://coveralls.io/github/go-playground/validator?branch=master)
|
||||
[](https://goreportcard.com/report/github.com/go-playground/validator)
|
||||
|
||||
2
vendor/github.com/go-playground/validator/v10/cache.go
generated
vendored
2
vendor/github.com/go-playground/validator/v10/cache.go
generated
vendored
@ -126,7 +126,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr
|
||||
|
||||
fld = typ.Field(i)
|
||||
|
||||
if !fld.Anonymous && len(fld.PkgPath) > 0 {
|
||||
if !v.privateFieldValidation && !fld.Anonymous && len(fld.PkgPath) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
10
vendor/github.com/go-playground/validator/v10/options.go
generated
vendored
10
vendor/github.com/go-playground/validator/v10/options.go
generated
vendored
@ -14,3 +14,13 @@ func WithRequiredStructEnabled() Option {
|
||||
v.requiredStructEnabled = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithPrivateFieldValidation activates validation for unexported fields via the use of the `unsafe` package.
|
||||
//
|
||||
// By opting into this feature you are acknowledging that you are aware of the risks and accept any current or future
|
||||
// consequences of using this feature.
|
||||
func WithPrivateFieldValidation() Option {
|
||||
return func(v *Validate) {
|
||||
v.privateFieldValidation = true
|
||||
}
|
||||
}
|
||||
|
||||
32
vendor/github.com/go-playground/validator/v10/validator.go
generated
vendored
32
vendor/github.com/go-playground/validator/v10/validator.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// per validate construct
|
||||
@ -156,7 +157,7 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr
|
||||
structNs: v.str2,
|
||||
fieldLen: uint8(len(cf.altName)),
|
||||
structfieldLen: uint8(len(cf.name)),
|
||||
value: current.Interface(),
|
||||
value: getValue(current),
|
||||
param: ct.param,
|
||||
kind: kind,
|
||||
typ: current.Type(),
|
||||
@ -410,7 +411,7 @@ OUTER:
|
||||
structNs: v.str2,
|
||||
fieldLen: uint8(len(cf.altName)),
|
||||
structfieldLen: uint8(len(cf.name)),
|
||||
value: current.Interface(),
|
||||
value: getValue(current),
|
||||
param: ct.param,
|
||||
kind: kind,
|
||||
typ: typ,
|
||||
@ -430,7 +431,7 @@ OUTER:
|
||||
structNs: v.str2,
|
||||
fieldLen: uint8(len(cf.altName)),
|
||||
structfieldLen: uint8(len(cf.name)),
|
||||
value: current.Interface(),
|
||||
value: getValue(current),
|
||||
param: ct.param,
|
||||
kind: kind,
|
||||
typ: typ,
|
||||
@ -470,7 +471,7 @@ OUTER:
|
||||
structNs: v.str2,
|
||||
fieldLen: uint8(len(cf.altName)),
|
||||
structfieldLen: uint8(len(cf.name)),
|
||||
value: current.Interface(),
|
||||
value: getValue(current),
|
||||
param: ct.param,
|
||||
kind: kind,
|
||||
typ: typ,
|
||||
@ -484,3 +485,26 @@ OUTER:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getValue(val reflect.Value) interface{} {
|
||||
if val.CanInterface() {
|
||||
return val.Interface()
|
||||
}
|
||||
|
||||
if val.CanAddr() {
|
||||
return reflect.NewAt(val.Type(), unsafe.Pointer(val.UnsafeAddr())).Elem().Interface()
|
||||
}
|
||||
|
||||
switch val.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return val.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return val.Uint()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return val.Complex()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return val.Float()
|
||||
default:
|
||||
return val.String()
|
||||
}
|
||||
}
|
||||
|
||||
1
vendor/github.com/go-playground/validator/v10/validator_instance.go
generated
vendored
1
vendor/github.com/go-playground/validator/v10/validator_instance.go
generated
vendored
@ -94,6 +94,7 @@ type Validate struct {
|
||||
hasCustomFuncs bool
|
||||
hasTagNameFunc bool
|
||||
requiredStructEnabled bool
|
||||
privateFieldValidation bool
|
||||
}
|
||||
|
||||
// New returns a new instance of 'validate' with sane defaults.
|
||||
|
||||
4
vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
generated
vendored
4
vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
generated
vendored
@ -62,7 +62,7 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf
|
||||
case *ecdsa.PublicKey:
|
||||
ecdsaKey = k
|
||||
default:
|
||||
return newError("ECDSA verify expects *ecsda.PublicKey", ErrInvalidKeyType)
|
||||
return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType)
|
||||
}
|
||||
|
||||
if len(sig) != 2*m.KeySize {
|
||||
@ -96,7 +96,7 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte
|
||||
case *ecdsa.PrivateKey:
|
||||
ecdsaKey = k
|
||||
default:
|
||||
return nil, newError("ECDSA sign expects *ecsda.PrivateKey", ErrInvalidKeyType)
|
||||
return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType)
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
|
||||
4
vendor/github.com/golang-jwt/jwt/v5/hmac.go
generated
vendored
4
vendor/github.com/golang-jwt/jwt/v5/hmac.go
generated
vendored
@ -91,7 +91,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa
|
||||
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
|
||||
if keyBytes, ok := key.([]byte); ok {
|
||||
if !m.Hash.Available() {
|
||||
return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
|
||||
return nil, ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||
@ -100,5 +100,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte,
|
||||
return hasher.Sum(nil), nil
|
||||
}
|
||||
|
||||
return nil, ErrInvalidKeyType
|
||||
return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
|
||||
}
|
||||
|
||||
2
vendor/github.com/klauspost/compress/s2/writer.go
generated
vendored
2
vendor/github.com/klauspost/compress/s2/writer.go
generated
vendored
@ -937,7 +937,7 @@ func WriterUncompressed() WriterOption {
|
||||
|
||||
// WriterBlockSize allows to override the default block size.
|
||||
// Blocks will be this size or smaller.
|
||||
// Minimum size is 4KB and and maximum size is 4MB.
|
||||
// Minimum size is 4KB and maximum size is 4MB.
|
||||
//
|
||||
// Bigger blocks may give bigger throughput on systems with many cores,
|
||||
// and will increase compression slightly, but it will limit the possible
|
||||
|
||||
10
vendor/github.com/libdns/libdns/README.md
generated
vendored
10
vendor/github.com/libdns/libdns/README.md
generated
vendored
@ -41,14 +41,18 @@ recs, err := provider.GetRecords(ctx, zone)
|
||||
|
||||
// create records (AppendRecords is similar)
|
||||
newRecs, err := provider.SetRecords(ctx, zone, []libdns.Record{
|
||||
{
|
||||
Type: "A",
|
||||
Name: "sub",
|
||||
Value: "1.2.3.4",
|
||||
},
|
||||
})
|
||||
|
||||
// delete records (this example uses provider-assigned ID)
|
||||
deletedRecs, err := provider.DeleteRecords(ctx, zone, []libdns.Record{
|
||||
{
|
||||
ID: "foobar",
|
||||
},
|
||||
})
|
||||
|
||||
// no matter which provider you use, the code stays the same!
|
||||
@ -56,11 +60,11 @@ deletedRecs, err := provider.DeleteRecords(ctx, zone, []libdns.Record{
|
||||
```
|
||||
|
||||
|
||||
## Implementing new providers
|
||||
## Implementing new provider packages
|
||||
|
||||
Providers are 100% written and maintained by the community! We all maintain just the packages for providers we use.
|
||||
Provider packages are 100% written and maintained by the community! Collectively, we all maintain the packages for providers we individually use.
|
||||
|
||||
**[Instructions for adding new providers](https://github.com/libdns/libdns/wiki/Implementing-providers)** are on this repo's wiki. Please feel free to contribute.
|
||||
**[Instructions for adding new libdns packages](https://github.com/libdns/libdns/wiki/Implementing-a-libdns-package)** are on this repo's wiki. Please feel free to contribute yours!
|
||||
|
||||
|
||||
## Similar projects
|
||||
|
||||
106
vendor/github.com/libdns/libdns/libdns.go
generated
vendored
106
vendor/github.com/libdns/libdns/libdns.go
generated
vendored
@ -10,15 +10,18 @@
|
||||
// that input records conform to this standard, while also ensuring that
|
||||
// output records do; adjustments to record names may need to be made before
|
||||
// or after provider API calls, for example, to maintain consistency with
|
||||
// all other libdns provider implementations. Helper functions are available
|
||||
// in this package to convert between relative and absolute names.
|
||||
// all other libdns packages. Helper functions are available in this package
|
||||
// to convert between relative and absolute names.
|
||||
//
|
||||
// Although zone names are a required input, libdns does not coerce any
|
||||
// particular representation of DNS zones; only records. Since zone name and
|
||||
// records are separate inputs in libdns interfaces, it is up to the caller
|
||||
// to pair a zone's name with its records in a way that works for them.
|
||||
//
|
||||
// All interface implementations must be safe for concurrent/parallel use.
|
||||
// All interface implementations must be safe for concurrent/parallel use,
|
||||
// meaning 1) no data races, and 2) simultaneous method calls must result
|
||||
// in either both their expected outcomes or an error.
|
||||
//
|
||||
// For example, if AppendRecords() is called at the same time and two API
|
||||
// requests are made to the provider at the same time, the result of both
|
||||
// requests must be visible after they both complete; if the provider does
|
||||
@ -32,6 +35,8 @@ package libdns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@ -89,7 +94,23 @@ type RecordDeleter interface {
|
||||
DeleteRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
|
||||
}
|
||||
|
||||
// ZoneLister can list available DNS zones.
|
||||
type ZoneLister interface {
|
||||
// ListZones returns the list of available DNS zones for use by
|
||||
// other libdns methods.
|
||||
//
|
||||
// Implementations must honor context cancellation and be safe for
|
||||
// concurrent use.
|
||||
ListZones(ctx context.Context) ([]Zone, error)
|
||||
}
|
||||
|
||||
// Record is a generalized representation of a DNS record.
|
||||
//
|
||||
// The values of this struct should be free of zone-file-specific syntax,
|
||||
// except if this struct's fields do not sufficiently represent all the
|
||||
// fields of a certain record type; in that case, the remaining data for
|
||||
// which there are not specific fields should be stored in the Value as
|
||||
// it appears in the zone file.
|
||||
type Record struct {
|
||||
// provider-specific metadata
|
||||
ID string
|
||||
@ -101,7 +122,76 @@ type Record struct {
|
||||
TTL time.Duration
|
||||
|
||||
// type-dependent record fields
|
||||
Priority int // used by MX, SRV, and URI records
|
||||
Priority uint // HTTPS, MX, SRV, and URI records
|
||||
Weight uint // SRV and URI records
|
||||
}
|
||||
|
||||
// Zone is a generalized representation of a DNS zone.
|
||||
type Zone struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// ToSRV parses the record into a SRV struct with fully-parsed, literal values.
|
||||
//
|
||||
// EXPERIMENTAL; subject to change or removal.
|
||||
func (r Record) ToSRV() (SRV, error) {
|
||||
if r.Type != "SRV" {
|
||||
return SRV{}, fmt.Errorf("record type not SRV: %s", r.Type)
|
||||
}
|
||||
|
||||
fields := strings.Fields(r.Value)
|
||||
if len(fields) != 2 {
|
||||
return SRV{}, fmt.Errorf("malformed SRV value; expected: '<port> <target>'")
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(fields[0])
|
||||
if err != nil {
|
||||
return SRV{}, fmt.Errorf("invalid port %s: %v", fields[0], err)
|
||||
}
|
||||
if port < 0 {
|
||||
return SRV{}, fmt.Errorf("port cannot be < 0: %d", port)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(r.Name, ".", 3)
|
||||
if len(parts) < 3 {
|
||||
return SRV{}, fmt.Errorf("name %v does not contain enough fields; expected format: '_service._proto.name'", r.Name)
|
||||
}
|
||||
|
||||
return SRV{
|
||||
Service: strings.TrimPrefix(parts[0], "_"),
|
||||
Proto: strings.TrimPrefix(parts[1], "_"),
|
||||
Name: parts[2],
|
||||
Priority: r.Priority,
|
||||
Weight: r.Weight,
|
||||
Port: uint(port),
|
||||
Target: fields[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SRV contains all the parsed data of an SRV record.
|
||||
//
|
||||
// EXPERIMENTAL; subject to change or removal.
|
||||
type SRV struct {
|
||||
Service string // no leading "_"
|
||||
Proto string // no leading "_"
|
||||
Name string
|
||||
Priority uint
|
||||
Weight uint
|
||||
Port uint
|
||||
Target string
|
||||
}
|
||||
|
||||
// ToRecord converts the parsed SRV data to a Record struct.
|
||||
//
|
||||
// EXPERIMENTAL; subject to change or removal.
|
||||
func (s SRV) ToRecord() Record {
|
||||
return Record{
|
||||
Type: "SRV",
|
||||
Name: fmt.Sprintf("_%s._%s.%s", s.Service, s.Proto, s.Name),
|
||||
Priority: s.Priority,
|
||||
Weight: s.Weight,
|
||||
Value: fmt.Sprintf("%d %s", s.Port, s.Target),
|
||||
}
|
||||
}
|
||||
|
||||
// RelativeName makes fqdn relative to zone. For example, for a FQDN of
|
||||
@ -109,7 +199,13 @@ type Record struct {
|
||||
//
|
||||
// If fqdn cannot be expressed relative to zone, the input fqdn is returned.
|
||||
func RelativeName(fqdn, zone string) string {
|
||||
return strings.TrimSuffix(strings.TrimSuffix(fqdn, zone), ".")
|
||||
// liberally ignore trailing dots on both fqdn and zone, because
|
||||
// the relative name won't have a trailing dot anyway; I assume
|
||||
// this won't be problematic...?
|
||||
// (initially implemented because Cloudflare returns "fully-
|
||||
// qualified" domains in their records without a trailing dot,
|
||||
// but the input zone typically has a trailing dot)
|
||||
return strings.TrimSuffix(strings.TrimSuffix(strings.TrimSuffix(fqdn, "."), strings.TrimSuffix(zone, ".")), ".")
|
||||
}
|
||||
|
||||
// AbsoluteName makes name into a fully-qualified domain name (FQDN) by
|
||||
|
||||
110
vendor/github.com/minio/minio-go/v7/CREDITS
generated
vendored
110
vendor/github.com/minio/minio-go/v7/CREDITS
generated
vendored
@ -1365,60 +1365,6 @@ THE SOFTWARE.
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/sirupsen/logrus
|
||||
https://github.com/sirupsen/logrus
|
||||
----------------------------------------------------------------
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/stretchr/testify
|
||||
https://github.com/stretchr/testify
|
||||
----------------------------------------------------------------
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
================================================================
|
||||
|
||||
golang.org/x/crypto
|
||||
https://golang.org/x/crypto
|
||||
----------------------------------------------------------------
|
||||
@ -1748,59 +1694,3 @@ third-party archives.
|
||||
|
||||
================================================================
|
||||
|
||||
gopkg.in/yaml.v3
|
||||
https://gopkg.in/yaml.v3
|
||||
----------------------------------------------------------------
|
||||
|
||||
This project is covered by two different licenses: MIT and Apache.
|
||||
|
||||
#### MIT License ####
|
||||
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original MIT license, with the additional
|
||||
copyright staring in 2011 when the project was ported over:
|
||||
|
||||
apic.go emitterc.go parserc.go readerc.go scannerc.go
|
||||
writerc.go yamlh.go yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006-2010 Kirill Simonov
|
||||
Copyright (c) 2006-2011 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
### Apache License ###
|
||||
|
||||
All the remaining project files are covered by the Apache license:
|
||||
|
||||
Copyright (c) 2011-2019 Canonical Ltd
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
================================================================
|
||||
|
||||
|
||||
2
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
@ -119,7 +119,7 @@ func (opts CopyDestOptions) Marshal(header http.Header) {
|
||||
if opts.ReplaceMetadata {
|
||||
header.Set("x-amz-metadata-directive", replaceDirective)
|
||||
for k, v := range filterCustomMeta(opts.UserMetadata) {
|
||||
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
|
||||
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) {
|
||||
header.Set(k, v)
|
||||
} else {
|
||||
header.Set("x-amz-meta-"+k, v)
|
||||
|
||||
4
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
@ -212,7 +212,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
|
||||
}
|
||||
|
||||
for k, v := range opts.UserMetadata {
|
||||
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
|
||||
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) {
|
||||
header.Set(k, v)
|
||||
} else {
|
||||
header.Set("x-amz-meta-"+k, v)
|
||||
@ -230,7 +230,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
|
||||
// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
|
||||
func (opts PutObjectOptions) validate() (err error) {
|
||||
for k, v := range opts.UserMetadata {
|
||||
if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) {
|
||||
if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) {
|
||||
return errInvalidArgument(k + " unsupported user defined metadata name")
|
||||
}
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
|
||||
23
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
23
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2023 MinIO, Inc.
|
||||
* Copyright 2015-2024 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -80,6 +80,8 @@ type Client struct {
|
||||
|
||||
// S3 specific accelerated endpoint.
|
||||
s3AccelerateEndpoint string
|
||||
// S3 dual-stack endpoints are enabled by default.
|
||||
s3DualstackEnabled bool
|
||||
|
||||
// Region endpoint
|
||||
region string
|
||||
@ -127,7 +129,7 @@ type Options struct {
|
||||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.67"
|
||||
libraryVersion = "v7.0.69"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
@ -158,9 +160,12 @@ func New(endpoint string, opts *Options) (*Client, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If Amazon S3 set to signature v4.
|
||||
if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
|
||||
// If Amazon S3 set to signature v4.
|
||||
clnt.overrideSignerType = credentials.SignatureV4
|
||||
// Amazon S3 endpoints are resolved into dual-stack endpoints by default
|
||||
// for backwards compatibility.
|
||||
clnt.s3DualstackEnabled = true
|
||||
}
|
||||
|
||||
return clnt, nil
|
||||
@ -330,6 +335,16 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
|
||||
}
|
||||
}
|
||||
|
||||
// SetS3EnableDualstack turns s3 dual-stack endpoints on or off for all requests.
|
||||
// The feature is only specific to S3 and is on by default. To read more about
|
||||
// Amazon S3 dual-stack endpoints visit -
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html
|
||||
func (c *Client) SetS3EnableDualstack(enabled bool) {
|
||||
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
|
||||
c.s3DualstackEnabled = enabled
|
||||
}
|
||||
}
|
||||
|
||||
// Hash materials provides relevant initialized hash algo writers
|
||||
// based on the expected signature type.
|
||||
//
|
||||
@ -926,7 +941,7 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is
|
||||
// Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint
|
||||
if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) {
|
||||
// Fetch new host based on the bucket location.
|
||||
host = getS3Endpoint(bucketLocation)
|
||||
host = getS3Endpoint(bucketLocation, c.s3DualstackEnabled)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
349
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
349
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
@ -237,6 +237,7 @@ func (m *STSAssumeRole) Retrieve() (Value, error) {
|
||||
AccessKeyID: a.Result.Credentials.AccessKey,
|
||||
SecretAccessKey: a.Result.Credentials.SecretKey,
|
||||
SessionToken: a.Result.Credentials.SessionToken,
|
||||
Expiration: a.Result.Credentials.Expiration,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
11
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
11
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
@ -30,17 +30,20 @@ const (
|
||||
defaultExpiryWindow = 0.8
|
||||
)
|
||||
|
||||
// A Value is the AWS credentials value for individual credential fields.
|
||||
// A Value is the S3 credentials value for individual credential fields.
|
||||
type Value struct {
|
||||
// AWS Access key ID
|
||||
// S3 Access key ID
|
||||
AccessKeyID string
|
||||
|
||||
// AWS Secret Access Key
|
||||
// S3 Secret Access Key
|
||||
SecretAccessKey string
|
||||
|
||||
// AWS Session Token
|
||||
// S3 Session Token
|
||||
SessionToken string
|
||||
|
||||
// Expiration of this credentials - null means no expiration associated
|
||||
Expiration time.Time
|
||||
|
||||
// Signature Type.
|
||||
SignerType SignatureType
|
||||
}
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
generated
vendored
@ -129,6 +129,7 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||
AccessKeyID: externalProcessCredentials.AccessKeyID,
|
||||
SecretAccessKey: externalProcessCredentials.SecretAccessKey,
|
||||
SessionToken: externalProcessCredentials.SessionToken,
|
||||
Expiration: externalProcessCredentials.Expiration,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
23
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
23
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
@ -61,6 +61,7 @@ type IAM struct {
|
||||
// Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
|
||||
Container struct {
|
||||
AuthorizationToken string
|
||||
AuthorizationTokenFile string
|
||||
CredentialsFullURI string
|
||||
CredentialsRelativeURI string
|
||||
}
|
||||
@ -105,6 +106,11 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
token = m.Container.AuthorizationToken
|
||||
}
|
||||
|
||||
tokenFile := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE")
|
||||
if tokenFile == "" {
|
||||
tokenFile = m.Container.AuthorizationToken
|
||||
}
|
||||
|
||||
relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
|
||||
if relativeURI == "" {
|
||||
relativeURI = m.Container.CredentialsRelativeURI
|
||||
@ -181,6 +187,10 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
|
||||
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
|
||||
|
||||
case tokenFile != "" && fullURI != "":
|
||||
endpoint = fullURI
|
||||
roleCreds, err = getEKSPodIdentityCredentials(m.Client, endpoint, tokenFile)
|
||||
|
||||
case fullURI != "":
|
||||
if len(endpoint) == 0 {
|
||||
endpoint = fullURI
|
||||
@ -209,6 +219,7 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
AccessKeyID: roleCreds.AccessKeyID,
|
||||
SecretAccessKey: roleCreds.SecretAccessKey,
|
||||
SessionToken: roleCreds.Token,
|
||||
Expiration: roleCreds.Expiration,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
@ -304,6 +315,18 @@ func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2Role
|
||||
return respCreds, nil
|
||||
}
|
||||
|
||||
func getEKSPodIdentityCredentials(client *http.Client, endpoint string, tokenFile string) (ec2RoleCredRespBody, error) {
|
||||
if tokenFile != "" {
|
||||
bytes, err := os.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: failed to read token file:%s", err)
|
||||
}
|
||||
token := string(bytes)
|
||||
return getEcsTaskCredentials(client, endpoint, token)
|
||||
}
|
||||
return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: no tokenFile found")
|
||||
}
|
||||
|
||||
func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
@ -177,6 +177,7 @@ func (m *STSClientGrants) Retrieve() (Value, error) {
|
||||
AccessKeyID: a.Result.Credentials.AccessKey,
|
||||
SecretAccessKey: a.Result.Credentials.SecretKey,
|
||||
SessionToken: a.Result.Credentials.SessionToken,
|
||||
Expiration: a.Result.Credentials.Expiration,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go
generated
vendored
@ -113,6 +113,7 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
AccessKeyID: cr.AccessKey,
|
||||
SecretAccessKey: cr.SecretKey,
|
||||
SessionToken: cr.SessionToken,
|
||||
Expiration: cr.Expiration,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
@ -184,6 +184,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
AccessKeyID: cr.AccessKey,
|
||||
SecretAccessKey: cr.SecretKey,
|
||||
SessionToken: cr.SessionToken,
|
||||
Expiration: cr.Expiration,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
@ -188,6 +188,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
AccessKeyID: response.Result.Credentials.AccessKey,
|
||||
SecretAccessKey: response.Result.Credentials.SecretKey,
|
||||
SessionToken: response.Result.Credentials.SessionToken,
|
||||
Expiration: response.Result.Credentials.Expiration,
|
||||
SignerType: SignatureDefault,
|
||||
}, nil
|
||||
}
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
@ -195,6 +195,7 @@ func (m *STSWebIdentity) Retrieve() (Value, error) {
|
||||
AccessKeyID: a.Result.Credentials.AccessKey,
|
||||
SecretAccessKey: a.Result.Credentials.SecretKey,
|
||||
SessionToken: a.Result.Credentials.SessionToken,
|
||||
Expiration: a.Result.Credentials.Expiration,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
@ -118,6 +118,7 @@ var retryableHTTPStatusCodes = map[int]struct{}{
|
||||
http.StatusBadGateway: {},
|
||||
http.StatusServiceUnavailable: {},
|
||||
http.StatusGatewayTimeout: {},
|
||||
520: {}, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected.
|
||||
// Add more HTTP status codes here.
|
||||
}
|
||||
|
||||
|
||||
183
vendor/github.com/minio/minio-go/v7/s3-endpoints.go
generated
vendored
183
vendor/github.com/minio/minio-go/v7/s3-endpoints.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
* Copyright 2015-2024 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -17,48 +17,155 @@
|
||||
|
||||
package minio
|
||||
|
||||
type awsS3Endpoint struct {
|
||||
endpoint string
|
||||
dualstackEndpoint string
|
||||
}
|
||||
|
||||
// awsS3EndpointMap Amazon S3 endpoint map.
|
||||
var awsS3EndpointMap = map[string]string{
|
||||
"us-east-1": "s3.dualstack.us-east-1.amazonaws.com",
|
||||
"us-east-2": "s3.dualstack.us-east-2.amazonaws.com",
|
||||
"us-west-2": "s3.dualstack.us-west-2.amazonaws.com",
|
||||
"us-west-1": "s3.dualstack.us-west-1.amazonaws.com",
|
||||
"ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com",
|
||||
"eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com",
|
||||
"eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com",
|
||||
"eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com",
|
||||
"eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com",
|
||||
"eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com",
|
||||
"eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com",
|
||||
"eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com",
|
||||
"eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com",
|
||||
"ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com",
|
||||
"ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com",
|
||||
"ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com",
|
||||
"ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com",
|
||||
"ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com",
|
||||
"ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com",
|
||||
"ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com",
|
||||
"af-south-1": "s3.dualstack.af-south-1.amazonaws.com",
|
||||
"me-central-1": "s3.dualstack.me-central-1.amazonaws.com",
|
||||
"me-south-1": "s3.dualstack.me-south-1.amazonaws.com",
|
||||
"sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com",
|
||||
"us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com",
|
||||
"cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn",
|
||||
"cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
|
||||
"ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com",
|
||||
"ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com",
|
||||
"il-central-1": "s3.dualstack.il-central-1.amazonaws.com",
|
||||
var awsS3EndpointMap = map[string]awsS3Endpoint{
|
||||
"us-east-1": {
|
||||
"s3.us-east-1.amazonaws.com",
|
||||
"s3.dualstack.us-east-1.amazonaws.com",
|
||||
},
|
||||
"us-east-2": {
|
||||
"s3.us-east-2.amazonaws.com",
|
||||
"s3.dualstack.us-east-2.amazonaws.com",
|
||||
},
|
||||
"us-west-2": {
|
||||
"s3.us-west-2.amazonaws.com",
|
||||
"s3.dualstack.us-west-2.amazonaws.com",
|
||||
},
|
||||
"us-west-1": {
|
||||
"s3.us-west-1.amazonaws.com",
|
||||
"s3.dualstack.us-west-1.amazonaws.com",
|
||||
},
|
||||
"ca-central-1": {
|
||||
"s3.ca-central-1.amazonaws.com",
|
||||
"s3.dualstack.ca-central-1.amazonaws.com",
|
||||
},
|
||||
"eu-west-1": {
|
||||
"s3.eu-west-1.amazonaws.com",
|
||||
"s3.dualstack.eu-west-1.amazonaws.com",
|
||||
},
|
||||
"eu-west-2": {
|
||||
"s3.eu-west-2.amazonaws.com",
|
||||
"s3.dualstack.eu-west-2.amazonaws.com",
|
||||
},
|
||||
"eu-west-3": {
|
||||
"s3.eu-west-3.amazonaws.com",
|
||||
"s3.dualstack.eu-west-3.amazonaws.com",
|
||||
},
|
||||
"eu-central-1": {
|
||||
"s3.eu-central-1.amazonaws.com",
|
||||
"s3.dualstack.eu-central-1.amazonaws.com",
|
||||
},
|
||||
"eu-central-2": {
|
||||
"s3.eu-central-2.amazonaws.com",
|
||||
"s3.dualstack.eu-central-2.amazonaws.com",
|
||||
},
|
||||
"eu-north-1": {
|
||||
"s3.eu-north-1.amazonaws.com",
|
||||
"s3.dualstack.eu-north-1.amazonaws.com",
|
||||
},
|
||||
"eu-south-1": {
|
||||
"s3.eu-south-1.amazonaws.com",
|
||||
"s3.dualstack.eu-south-1.amazonaws.com",
|
||||
},
|
||||
"eu-south-2": {
|
||||
"s3.eu-south-2.amazonaws.com",
|
||||
"s3.dualstack.eu-south-2.amazonaws.com",
|
||||
},
|
||||
"ap-east-1": {
|
||||
"s3.ap-east-1.amazonaws.com",
|
||||
"s3.dualstack.ap-east-1.amazonaws.com",
|
||||
},
|
||||
"ap-south-1": {
|
||||
"s3.ap-south-1.amazonaws.com",
|
||||
"s3.dualstack.ap-south-1.amazonaws.com",
|
||||
},
|
||||
"ap-south-2": {
|
||||
"s3.ap-south-2.amazonaws.com",
|
||||
"s3.dualstack.ap-south-2.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-1": {
|
||||
"s3.ap-southeast-1.amazonaws.com",
|
||||
"s3.dualstack.ap-southeast-1.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-2": {
|
||||
"s3.ap-southeast-2.amazonaws.com",
|
||||
"s3.dualstack.ap-southeast-2.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-3": {
|
||||
"s3.ap-southeast-3.amazonaws.com",
|
||||
"s3.dualstack.ap-southeast-3.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-4": {
|
||||
"s3.ap-southeast-4.amazonaws.com",
|
||||
"s3.dualstack.ap-southeast-4.amazonaws.com",
|
||||
},
|
||||
"ap-northeast-1": {
|
||||
"s3.ap-northeast-1.amazonaws.com",
|
||||
"s3.dualstack.ap-northeast-1.amazonaws.com",
|
||||
},
|
||||
"ap-northeast-2": {
|
||||
"s3.ap-northeast-2.amazonaws.com",
|
||||
"s3.dualstack.ap-northeast-2.amazonaws.com",
|
||||
},
|
||||
"ap-northeast-3": {
|
||||
"s3.ap-northeast-3.amazonaws.com",
|
||||
"s3.dualstack.ap-northeast-3.amazonaws.com",
|
||||
},
|
||||
"af-south-1": {
|
||||
"s3.af-south-1.amazonaws.com",
|
||||
"s3.dualstack.af-south-1.amazonaws.com",
|
||||
},
|
||||
"me-central-1": {
|
||||
"s3.me-central-1.amazonaws.com",
|
||||
"s3.dualstack.me-central-1.amazonaws.com",
|
||||
},
|
||||
"me-south-1": {
|
||||
"s3.me-south-1.amazonaws.com",
|
||||
"s3.dualstack.me-south-1.amazonaws.com",
|
||||
},
|
||||
"sa-east-1": {
|
||||
"s3.sa-east-1.amazonaws.com",
|
||||
"s3.dualstack.sa-east-1.amazonaws.com",
|
||||
},
|
||||
"us-gov-west-1": {
|
||||
"s3.us-gov-west-1.amazonaws.com",
|
||||
"s3.dualstack.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
"us-gov-east-1": {
|
||||
"s3.us-gov-east-1.amazonaws.com",
|
||||
"s3.dualstack.us-gov-east-1.amazonaws.com",
|
||||
},
|
||||
"cn-north-1": {
|
||||
"s3.cn-north-1.amazonaws.com.cn",
|
||||
"s3.dualstack.cn-north-1.amazonaws.com.cn",
|
||||
},
|
||||
"cn-northwest-1": {
|
||||
"s3.cn-northwest-1.amazonaws.com.cn",
|
||||
"s3.dualstack.cn-northwest-1.amazonaws.com.cn",
|
||||
},
|
||||
"il-central-1": {
|
||||
"s3.il-central-1.amazonaws.com",
|
||||
"s3.dualstack.il-central-1.amazonaws.com",
|
||||
},
|
||||
}
|
||||
|
||||
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
|
||||
func getS3Endpoint(bucketLocation string) (s3Endpoint string) {
|
||||
func getS3Endpoint(bucketLocation string, useDualstack bool) (endpoint string) {
|
||||
s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
|
||||
if !ok {
|
||||
// Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint.
|
||||
s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com"
|
||||
// Default to 's3.us-east-1.amazonaws.com' endpoint.
|
||||
if useDualstack {
|
||||
return "s3.dualstack.us-east-1.amazonaws.com"
|
||||
}
|
||||
return s3Endpoint
|
||||
return "s3.us-east-1.amazonaws.com"
|
||||
}
|
||||
if useDualstack {
|
||||
return s3Endpoint.dualstackEndpoint
|
||||
}
|
||||
return s3Endpoint.endpoint
|
||||
}
|
||||
|
||||
15
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
15
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
@ -512,6 +512,21 @@ func isAmzHeader(headerKey string) bool {
|
||||
return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-")
|
||||
}
|
||||
|
||||
var supportedReplicationEncryptionHeaders = map[string]bool{
|
||||
"x-minio-replication-server-side-encryption-sealed-key": true,
|
||||
"x-minio-replication-server-side-encryption-seal-algorithm": true,
|
||||
"x-minio-replication-server-side-encryption-iv": true,
|
||||
"x-minio-replication-encrypted-multipart": true,
|
||||
"x-minio-replication-actual-object-size": true,
|
||||
// Add more supported headers here.
|
||||
// Must be lower case.
|
||||
}
|
||||
|
||||
// isValidReplicationEncryptionHeader returns true if header is one of valid replication encryption headers
|
||||
func isValidReplicationEncryptionHeader(headerKey string) bool {
|
||||
return supportedReplicationEncryptionHeaders[strings.ToLower(headerKey)]
|
||||
}
|
||||
|
||||
// supportedQueryValues is a list of query strings that can be passed in when using GetObject.
|
||||
var supportedQueryValues = map[string]bool{
|
||||
"attributes": true,
|
||||
|
||||
6
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
6
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@ -75,14 +75,14 @@ func ResponseFormat(h http.Header) Format {
|
||||
func NewDecoder(r io.Reader, format Format) Decoder {
|
||||
switch format.FormatType() {
|
||||
case TypeProtoDelim:
|
||||
return &protoDecoder{r: r}
|
||||
return &protoDecoder{r: bufio.NewReader(r)}
|
||||
}
|
||||
return &textDecoder{r: r}
|
||||
}
|
||||
|
||||
// protoDecoder implements the Decoder interface for protocol buffers.
|
||||
type protoDecoder struct {
|
||||
r io.Reader
|
||||
r protodelim.Reader
|
||||
}
|
||||
|
||||
// Decode implements the Decoder interface.
|
||||
@ -90,7 +90,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||
opts := protodelim.UnmarshalOptions{
|
||||
MaxSize: -1,
|
||||
}
|
||||
if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil {
|
||||
if err := opts.UnmarshalFrom(d.r, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||
|
||||
10
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
10
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@ -139,7 +139,13 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||
// interface is kept for backwards compatibility.
|
||||
// In cases where the Format does not allow for UTF-8 names, the global
|
||||
// NameEscapingScheme will be applied.
|
||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
||||
//
|
||||
// NewEncoder can be called with additional options to customize the OpenMetrics text output.
|
||||
// For example:
|
||||
// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
|
||||
//
|
||||
// Extra options are ignored for all other formats.
|
||||
func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
|
||||
escapingScheme := format.ToEscapingScheme()
|
||||
|
||||
switch format.FormatType() {
|
||||
@ -178,7 +184,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
|
||||
case TypeOpenMetrics:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme))
|
||||
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
|
||||
return err
|
||||
},
|
||||
close: func() error {
|
||||
|
||||
22
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
22
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@ -15,6 +15,7 @@
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
@ -63,7 +64,7 @@ const (
|
||||
type FormatType int
|
||||
|
||||
const (
|
||||
TypeUnknown = iota
|
||||
TypeUnknown FormatType = iota
|
||||
TypeProtoCompact
|
||||
TypeProtoDelim
|
||||
TypeProtoText
|
||||
@ -73,7 +74,8 @@ const (
|
||||
|
||||
// NewFormat generates a new Format from the type provided. Mostly used for
|
||||
// tests, most Formats should be generated as part of content negotiation in
|
||||
// encode.go.
|
||||
// encode.go. If a type has more than one version, the latest version will be
|
||||
// returned.
|
||||
func NewFormat(t FormatType) Format {
|
||||
switch t {
|
||||
case TypeProtoCompact:
|
||||
@ -91,13 +93,21 @@ func NewFormat(t FormatType) Format {
|
||||
}
|
||||
}
|
||||
|
||||
// NewOpenMetricsFormat generates a new OpenMetrics format matching the
|
||||
// specified version number.
|
||||
func NewOpenMetricsFormat(version string) (Format, error) {
|
||||
if version == OpenMetricsVersion_0_0_1 {
|
||||
return fmtOpenMetrics_0_0_1, nil
|
||||
}
|
||||
if version == OpenMetricsVersion_1_0_0 {
|
||||
return fmtOpenMetrics_1_0_0, nil
|
||||
}
|
||||
return fmtUnknown, fmt.Errorf("unknown open metrics version string")
|
||||
}
|
||||
|
||||
// FormatType deduces an overall FormatType for the given format.
|
||||
func (f Format) FormatType() FormatType {
|
||||
toks := strings.Split(string(f), ";")
|
||||
if len(toks) < 2 {
|
||||
return TypeUnknown
|
||||
}
|
||||
|
||||
params := make(map[string]string)
|
||||
for i, t := range toks {
|
||||
if i == 0 {
|
||||
|
||||
198
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
198
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
@ -22,11 +22,47 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
type encoderOption struct {
|
||||
withCreatedLines bool
|
||||
withUnit bool
|
||||
}
|
||||
|
||||
type EncoderOption func(*encoderOption)
|
||||
|
||||
// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
|
||||
// to include _created lines (See
|
||||
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
|
||||
// Created timestamps can improve the accuracy of series reset detection, but
|
||||
// come with a bandwidth cost.
|
||||
//
|
||||
// At the time of writing, created timestamp ingestion is still experimental in
|
||||
// Prometheus and need to be enabled with the feature-flag
|
||||
// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
|
||||
// still possible. Therefore, it is recommended to use this feature with caution.
|
||||
func WithCreatedLines() EncoderOption {
|
||||
return func(t *encoderOption) {
|
||||
t.withCreatedLines = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithUnit is an EncoderOption enabling a set unit to be written to the output
|
||||
// and to be added to the metric name, if it's not there already, as a suffix.
|
||||
// Without opting in this way, the unit will not be added to the metric name and,
|
||||
// on top of that, the unit will not be passed onto the output, even if it
|
||||
// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
|
||||
func WithUnit() EncoderOption {
|
||||
return func(t *encoderOption) {
|
||||
t.withUnit = true
|
||||
}
|
||||
}
|
||||
|
||||
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
||||
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
||||
// the number of bytes written and any error encountered. The output will have
|
||||
@ -59,20 +95,34 @@ import (
|
||||
// Prometheus to OpenMetrics or vice versa:
|
||||
//
|
||||
// - Counters are expected to have the `_total` suffix in their metric name. In
|
||||
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
|
||||
// line. A counter with a missing `_total` suffix is not an error. However,
|
||||
// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
|
||||
// lines. A counter with a missing `_total` suffix is not an error. However,
|
||||
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
||||
// output.
|
||||
//
|
||||
// - No support for the following (optional) features: `# UNIT` line, `_created`
|
||||
// line, info type, stateset type, gaugehistogram type.
|
||||
// - According to the OM specs, the `# UNIT` line is optional, but if populated,
|
||||
// the unit has to be present in the metric name as its suffix:
|
||||
// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
|
||||
// However, in order to accommodate any potential scenario where such a change in the
|
||||
// metric name is not desirable, the users are here given the choice of either explicitly
|
||||
// opt in, in case they wish for the unit to be included in the output AND in the metric name
|
||||
// as a suffix (see the description of the WithUnit function above),
|
||||
// or not to opt in, in case they don't want for any of that to happen.
|
||||
//
|
||||
// - No support for the following (optional) features: info type,
|
||||
// stateset type, gaugehistogram type.
|
||||
//
|
||||
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
||||
// exemplars that are larger than allowed by the OpenMetrics specification).
|
||||
//
|
||||
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
||||
// with a `NaN` value.)
|
||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
|
||||
toOM := encoderOption{}
|
||||
for _, option := range options {
|
||||
option(&toOM)
|
||||
}
|
||||
|
||||
name := in.GetName()
|
||||
if name == "" {
|
||||
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
@ -97,10 +147,13 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
var (
|
||||
n int
|
||||
metricType = in.GetType()
|
||||
shortName = name
|
||||
compliantName = name
|
||||
)
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
|
||||
shortName = name[:len(name)-6]
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
|
||||
compliantName = name[:len(name)-6]
|
||||
}
|
||||
if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
|
||||
compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
|
||||
}
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
@ -110,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeName(w, shortName)
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
@ -136,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeName(w, shortName)
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
@ -163,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if toOM.withUnit && in.Unit != nil {
|
||||
n, err = w.WriteString("# UNIT ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeEscapedString(w, *in.Unit, true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var createdTsBytesWritten int
|
||||
|
||||
// Finally the samples, one line for each.
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
|
||||
compliantName = compliantName + "_total"
|
||||
}
|
||||
for _, metric := range in.Metric {
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if metric.Counter == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected counter in metric %s %s", name, metric,
|
||||
"expected counter in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
// Note that we have ensured above that either the name
|
||||
// ends on `_total` or that the rendered type is
|
||||
// `unknown`. Therefore, no `_total` must be added here.
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Counter.GetValue(), 0, false,
|
||||
metric.Counter.Exemplar,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
case dto.MetricType_GAUGE:
|
||||
if metric.Gauge == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected gauge in metric %s %s", name, metric,
|
||||
"expected gauge in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Gauge.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_UNTYPED:
|
||||
if metric.Untyped == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected untyped in metric %s %s", name, metric,
|
||||
"expected untyped in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Untyped.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_SUMMARY:
|
||||
if metric.Summary == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected summary in metric %s %s", name, metric,
|
||||
"expected summary in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric,
|
||||
w, compliantName, "", metric,
|
||||
model.QuantileLabel, q.GetQuantile(),
|
||||
q.GetValue(), 0, false,
|
||||
nil,
|
||||
@ -222,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
w, compliantName, "_sum", metric, "", 0,
|
||||
metric.Summary.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
@ -231,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
w, compliantName, "_count", metric, "", 0,
|
||||
0, metric.Summary.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if metric.Histogram == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected histogram in metric %s %s", name, metric,
|
||||
"expected histogram in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
infSeen := false
|
||||
for _, b := range metric.Histogram.Bucket {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
w, compliantName, "_bucket", metric,
|
||||
model.BucketLabel, b.GetUpperBound(),
|
||||
0, b.GetCumulativeCount(), true,
|
||||
b.Exemplar,
|
||||
@ -259,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
}
|
||||
if !infSeen {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
w, compliantName, "_bucket", metric,
|
||||
model.BucketLabel, math.Inf(+1),
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
@ -270,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
w, compliantName, "_sum", metric, "", 0,
|
||||
metric.Histogram.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
@ -279,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
w, compliantName, "_count", metric, "", 0,
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
default:
|
||||
return written, fmt.Errorf(
|
||||
"unexpected type in metric %s %s", name, metric,
|
||||
"unexpected type in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
written += n
|
||||
@ -350,7 +445,7 @@ func writeOpenMetricsSample(
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
if exemplar != nil {
|
||||
if exemplar != nil && len(exemplar.Label) > 0 {
|
||||
n, err = writeExemplar(w, exemplar)
|
||||
written += n
|
||||
if err != nil {
|
||||
@ -473,6 +568,49 @@ func writeOpenMetricsNameAndLabelPairs(
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeOpenMetricsCreated writes the created timestamp for a single time series
|
||||
// following OpenMetrics text format to w, given the metric name, the metric proto
|
||||
// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
|
||||
// an additional label name with a float64 value (use empty string as label name if
|
||||
// not required) and the timestamp that represents the created timestamp.
|
||||
// The function returns the number of bytes written and any error encountered.
|
||||
func writeOpenMetricsCreated(w enhancedWriter,
|
||||
name, suffixToTrim string, metric *dto.Metric,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
createdTimestamp *timestamppb.Timestamp,
|
||||
) (int, error) {
|
||||
written := 0
|
||||
n, err := writeOpenMetricsNameAndLabelPairs(
|
||||
w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
// TODO(beorn7): Format this directly from components of ts to
|
||||
// avoid overflow/underflow and precision issues of the float
|
||||
// conversion.
|
||||
n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
||||
// function returns the number of bytes written and any error encountered.
|
||||
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
||||
|
||||
11
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
11
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
|
||||
@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet {
|
||||
return result
|
||||
}
|
||||
|
||||
func (l LabelSet) String() string {
|
||||
lstrs := make([]string, 0, len(l))
|
||||
for l, v := range l {
|
||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
|
||||
}
|
||||
|
||||
sort.Strings(lstrs)
|
||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||
}
|
||||
|
||||
// Fingerprint returns the LabelSet's fingerprint.
|
||||
func (ls LabelSet) Fingerprint() Fingerprint {
|
||||
return labelSetToFingerprint(ls)
|
||||
|
||||
45
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
Normal file
45
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.21
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
|
||||
func (l LabelSet) String() string {
|
||||
var lna [32]string // On stack to avoid memory allocation for sorting names.
|
||||
labelNames := lna[:0]
|
||||
for name := range l {
|
||||
labelNames = append(labelNames, string(name))
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
var bytea [1024]byte // On stack to avoid memory allocation while building the output.
|
||||
b := bytes.NewBuffer(bytea[:0])
|
||||
b.WriteByte('{')
|
||||
for i, name := range labelNames {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(name)
|
||||
b.WriteByte('=')
|
||||
b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.String()
|
||||
}
|
||||
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
Normal file
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.21
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// String was optimized using functions not available for go 1.20
|
||||
// or lower. We keep the old implementation for compatibility with client_golang.
|
||||
// Once client golang drops support for go 1.20 (scheduled for August 2024), this
|
||||
// file can be removed.
|
||||
func (l LabelSet) String() string {
|
||||
labelNames := make([]string, 0, len(l))
|
||||
for name := range l {
|
||||
labelNames = append(labelNames, string(name))
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
lstrs := make([]string, 0, len(l))
|
||||
for _, name := range labelNames {
|
||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
|
||||
}
|
||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||
}
|
||||
1
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
1
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@ -204,6 +204,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
out := &dto.MetricFamily{
|
||||
Help: v.Help,
|
||||
Type: v.Type,
|
||||
Unit: v.Unit,
|
||||
}
|
||||
|
||||
// If the name is nil, copy as-is, don't try to escape.
|
||||
|
||||
16
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
16
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -61,11 +61,11 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.54.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
GOLANGCI_LINT_VERSION ?= v1.55.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
|
||||
# If we're in CI and there is an Actions file, that means the linter
|
||||
# is being run in Actions, so we don't need to run it here.
|
||||
ifneq (,$(SKIP_GOLANGCI_LINT))
|
||||
@ -169,12 +169,16 @@ common-vet:
|
||||
common-lint: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint"
|
||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
endif
|
||||
|
||||
.PHONY: common-lint-fix
|
||||
common-lint-fix: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint fix"
|
||||
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
endif
|
||||
|
||||
.PHONY: common-yamllint
|
||||
common-yamllint:
|
||||
@echo ">> running yamllint on all YAML files in the repository"
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
2
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
|
||||
|
||||
kv := strings.Split(text, ":")
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
|
||||
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
|
||||
}
|
||||
|
||||
k := strings.TrimSpace(kv[0])
|
||||
|
||||
218
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
218
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
@ -126,6 +126,7 @@ type Meminfo struct {
|
||||
VmallocUsed *uint64
|
||||
// largest contiguous block of vmalloc area which is free
|
||||
VmallocChunk *uint64
|
||||
Percpu *uint64
|
||||
HardwareCorrupted *uint64
|
||||
AnonHugePages *uint64
|
||||
ShmemHugePages *uint64
|
||||
@ -140,6 +141,55 @@ type Meminfo struct {
|
||||
DirectMap4k *uint64
|
||||
DirectMap2M *uint64
|
||||
DirectMap1G *uint64
|
||||
|
||||
// The struct fields below are the byte-normalized counterparts to the
|
||||
// existing struct fields. Values are normalized using the optional
|
||||
// unit field in the meminfo line.
|
||||
MemTotalBytes *uint64
|
||||
MemFreeBytes *uint64
|
||||
MemAvailableBytes *uint64
|
||||
BuffersBytes *uint64
|
||||
CachedBytes *uint64
|
||||
SwapCachedBytes *uint64
|
||||
ActiveBytes *uint64
|
||||
InactiveBytes *uint64
|
||||
ActiveAnonBytes *uint64
|
||||
InactiveAnonBytes *uint64
|
||||
ActiveFileBytes *uint64
|
||||
InactiveFileBytes *uint64
|
||||
UnevictableBytes *uint64
|
||||
MlockedBytes *uint64
|
||||
SwapTotalBytes *uint64
|
||||
SwapFreeBytes *uint64
|
||||
DirtyBytes *uint64
|
||||
WritebackBytes *uint64
|
||||
AnonPagesBytes *uint64
|
||||
MappedBytes *uint64
|
||||
ShmemBytes *uint64
|
||||
SlabBytes *uint64
|
||||
SReclaimableBytes *uint64
|
||||
SUnreclaimBytes *uint64
|
||||
KernelStackBytes *uint64
|
||||
PageTablesBytes *uint64
|
||||
NFSUnstableBytes *uint64
|
||||
BounceBytes *uint64
|
||||
WritebackTmpBytes *uint64
|
||||
CommitLimitBytes *uint64
|
||||
CommittedASBytes *uint64
|
||||
VmallocTotalBytes *uint64
|
||||
VmallocUsedBytes *uint64
|
||||
VmallocChunkBytes *uint64
|
||||
PercpuBytes *uint64
|
||||
HardwareCorruptedBytes *uint64
|
||||
AnonHugePagesBytes *uint64
|
||||
ShmemHugePagesBytes *uint64
|
||||
ShmemPmdMappedBytes *uint64
|
||||
CmaTotalBytes *uint64
|
||||
CmaFreeBytes *uint64
|
||||
HugepagesizeBytes *uint64
|
||||
DirectMap4kBytes *uint64
|
||||
DirectMap2MBytes *uint64
|
||||
DirectMap1GBytes *uint64
|
||||
}
|
||||
|
||||
// Meminfo returns an information about current kernel/system memory statistics.
|
||||
@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
||||
var m Meminfo
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Each line has at least a name and value; we ignore the unit.
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
|
||||
}
|
||||
var val, valBytes uint64
|
||||
|
||||
v, err := strconv.ParseUint(fields[1], 0, 64)
|
||||
val, err := strconv.ParseUint(fields[1], 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// No unit present, use the parsed the value as bytes directly.
|
||||
valBytes = val
|
||||
case 3:
|
||||
// Unit present in optional 3rd field, convert it to
|
||||
// bytes. The only unit supported within the Linux
|
||||
// kernel is `kB`.
|
||||
if fields[2] != "kB" {
|
||||
return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
|
||||
}
|
||||
|
||||
valBytes = 1024 * val
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
|
||||
}
|
||||
|
||||
switch fields[0] {
|
||||
case "MemTotal:":
|
||||
m.MemTotal = &v
|
||||
m.MemTotal = &val
|
||||
m.MemTotalBytes = &valBytes
|
||||
case "MemFree:":
|
||||
m.MemFree = &v
|
||||
m.MemFree = &val
|
||||
m.MemFreeBytes = &valBytes
|
||||
case "MemAvailable:":
|
||||
m.MemAvailable = &v
|
||||
m.MemAvailable = &val
|
||||
m.MemAvailableBytes = &valBytes
|
||||
case "Buffers:":
|
||||
m.Buffers = &v
|
||||
m.Buffers = &val
|
||||
m.BuffersBytes = &valBytes
|
||||
case "Cached:":
|
||||
m.Cached = &v
|
||||
m.Cached = &val
|
||||
m.CachedBytes = &valBytes
|
||||
case "SwapCached:":
|
||||
m.SwapCached = &v
|
||||
m.SwapCached = &val
|
||||
m.SwapCachedBytes = &valBytes
|
||||
case "Active:":
|
||||
m.Active = &v
|
||||
m.Active = &val
|
||||
m.ActiveBytes = &valBytes
|
||||
case "Inactive:":
|
||||
m.Inactive = &v
|
||||
m.Inactive = &val
|
||||
m.InactiveBytes = &valBytes
|
||||
case "Active(anon):":
|
||||
m.ActiveAnon = &v
|
||||
m.ActiveAnon = &val
|
||||
m.ActiveAnonBytes = &valBytes
|
||||
case "Inactive(anon):":
|
||||
m.InactiveAnon = &v
|
||||
m.InactiveAnon = &val
|
||||
m.InactiveAnonBytes = &valBytes
|
||||
case "Active(file):":
|
||||
m.ActiveFile = &v
|
||||
m.ActiveFile = &val
|
||||
m.ActiveFileBytes = &valBytes
|
||||
case "Inactive(file):":
|
||||
m.InactiveFile = &v
|
||||
m.InactiveFile = &val
|
||||
m.InactiveFileBytes = &valBytes
|
||||
case "Unevictable:":
|
||||
m.Unevictable = &v
|
||||
m.Unevictable = &val
|
||||
m.UnevictableBytes = &valBytes
|
||||
case "Mlocked:":
|
||||
m.Mlocked = &v
|
||||
m.Mlocked = &val
|
||||
m.MlockedBytes = &valBytes
|
||||
case "SwapTotal:":
|
||||
m.SwapTotal = &v
|
||||
m.SwapTotal = &val
|
||||
m.SwapTotalBytes = &valBytes
|
||||
case "SwapFree:":
|
||||
m.SwapFree = &v
|
||||
m.SwapFree = &val
|
||||
m.SwapFreeBytes = &valBytes
|
||||
case "Dirty:":
|
||||
m.Dirty = &v
|
||||
m.Dirty = &val
|
||||
m.DirtyBytes = &valBytes
|
||||
case "Writeback:":
|
||||
m.Writeback = &v
|
||||
m.Writeback = &val
|
||||
m.WritebackBytes = &valBytes
|
||||
case "AnonPages:":
|
||||
m.AnonPages = &v
|
||||
m.AnonPages = &val
|
||||
m.AnonPagesBytes = &valBytes
|
||||
case "Mapped:":
|
||||
m.Mapped = &v
|
||||
m.Mapped = &val
|
||||
m.MappedBytes = &valBytes
|
||||
case "Shmem:":
|
||||
m.Shmem = &v
|
||||
m.Shmem = &val
|
||||
m.ShmemBytes = &valBytes
|
||||
case "Slab:":
|
||||
m.Slab = &v
|
||||
m.Slab = &val
|
||||
m.SlabBytes = &valBytes
|
||||
case "SReclaimable:":
|
||||
m.SReclaimable = &v
|
||||
m.SReclaimable = &val
|
||||
m.SReclaimableBytes = &valBytes
|
||||
case "SUnreclaim:":
|
||||
m.SUnreclaim = &v
|
||||
m.SUnreclaim = &val
|
||||
m.SUnreclaimBytes = &valBytes
|
||||
case "KernelStack:":
|
||||
m.KernelStack = &v
|
||||
m.KernelStack = &val
|
||||
m.KernelStackBytes = &valBytes
|
||||
case "PageTables:":
|
||||
m.PageTables = &v
|
||||
m.PageTables = &val
|
||||
m.PageTablesBytes = &valBytes
|
||||
case "NFS_Unstable:":
|
||||
m.NFSUnstable = &v
|
||||
m.NFSUnstable = &val
|
||||
m.NFSUnstableBytes = &valBytes
|
||||
case "Bounce:":
|
||||
m.Bounce = &v
|
||||
m.Bounce = &val
|
||||
m.BounceBytes = &valBytes
|
||||
case "WritebackTmp:":
|
||||
m.WritebackTmp = &v
|
||||
m.WritebackTmp = &val
|
||||
m.WritebackTmpBytes = &valBytes
|
||||
case "CommitLimit:":
|
||||
m.CommitLimit = &v
|
||||
m.CommitLimit = &val
|
||||
m.CommitLimitBytes = &valBytes
|
||||
case "Committed_AS:":
|
||||
m.CommittedAS = &v
|
||||
m.CommittedAS = &val
|
||||
m.CommittedASBytes = &valBytes
|
||||
case "VmallocTotal:":
|
||||
m.VmallocTotal = &v
|
||||
m.VmallocTotal = &val
|
||||
m.VmallocTotalBytes = &valBytes
|
||||
case "VmallocUsed:":
|
||||
m.VmallocUsed = &v
|
||||
m.VmallocUsed = &val
|
||||
m.VmallocUsedBytes = &valBytes
|
||||
case "VmallocChunk:":
|
||||
m.VmallocChunk = &v
|
||||
m.VmallocChunk = &val
|
||||
m.VmallocChunkBytes = &valBytes
|
||||
case "Percpu:":
|
||||
m.Percpu = &val
|
||||
m.PercpuBytes = &valBytes
|
||||
case "HardwareCorrupted:":
|
||||
m.HardwareCorrupted = &v
|
||||
m.HardwareCorrupted = &val
|
||||
m.HardwareCorruptedBytes = &valBytes
|
||||
case "AnonHugePages:":
|
||||
m.AnonHugePages = &v
|
||||
m.AnonHugePages = &val
|
||||
m.AnonHugePagesBytes = &valBytes
|
||||
case "ShmemHugePages:":
|
||||
m.ShmemHugePages = &v
|
||||
m.ShmemHugePages = &val
|
||||
m.ShmemHugePagesBytes = &valBytes
|
||||
case "ShmemPmdMapped:":
|
||||
m.ShmemPmdMapped = &v
|
||||
m.ShmemPmdMapped = &val
|
||||
m.ShmemPmdMappedBytes = &valBytes
|
||||
case "CmaTotal:":
|
||||
m.CmaTotal = &v
|
||||
m.CmaTotal = &val
|
||||
m.CmaTotalBytes = &valBytes
|
||||
case "CmaFree:":
|
||||
m.CmaFree = &v
|
||||
m.CmaFree = &val
|
||||
m.CmaFreeBytes = &valBytes
|
||||
case "HugePages_Total:":
|
||||
m.HugePagesTotal = &v
|
||||
m.HugePagesTotal = &val
|
||||
case "HugePages_Free:":
|
||||
m.HugePagesFree = &v
|
||||
m.HugePagesFree = &val
|
||||
case "HugePages_Rsvd:":
|
||||
m.HugePagesRsvd = &v
|
||||
m.HugePagesRsvd = &val
|
||||
case "HugePages_Surp:":
|
||||
m.HugePagesSurp = &v
|
||||
m.HugePagesSurp = &val
|
||||
case "Hugepagesize:":
|
||||
m.Hugepagesize = &v
|
||||
m.Hugepagesize = &val
|
||||
m.HugepagesizeBytes = &valBytes
|
||||
case "DirectMap4k:":
|
||||
m.DirectMap4k = &v
|
||||
m.DirectMap4k = &val
|
||||
m.DirectMap4kBytes = &valBytes
|
||||
case "DirectMap2M:":
|
||||
m.DirectMap2M = &v
|
||||
m.DirectMap2M = &val
|
||||
m.DirectMap2MBytes = &valBytes
|
||||
case "DirectMap1G:":
|
||||
m.DirectMap1G = &v
|
||||
m.DirectMap1G = &val
|
||||
m.DirectMap1GBytes = &valBytes
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
26
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
26
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@ -50,10 +50,13 @@ type (
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
// Drops shows the total number of dropped packets of all UPD sockets.
|
||||
Drops *uint64
|
||||
}
|
||||
|
||||
// netIPSocketLine represents the fields parsed from a single line
|
||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
||||
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netIPSocketLine struct {
|
||||
Sl uint64
|
||||
@ -66,6 +69,7 @@ type (
|
||||
RxQueue uint64
|
||||
UID uint64
|
||||
Inode uint64
|
||||
Drops *uint64
|
||||
}
|
||||
)
|
||||
|
||||
@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
|
||||
defer f.Close()
|
||||
|
||||
var netIPSocket NetIPSocket
|
||||
isUDP := strings.Contains(file, "udp")
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetIPSocketLine(fields)
|
||||
line, err := parseNetIPSocketLine(fields, isUDP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
|
||||
defer f.Close()
|
||||
|
||||
var netIPSocketSummary NetIPSocketSummary
|
||||
var udpPacketDrops uint64
|
||||
isUDP := strings.Contains(file, "udp")
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetIPSocketLine(fields)
|
||||
line, err := parseNetIPSocketLine(fields, isUDP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netIPSocketSummary.TxQueueLength += line.TxQueue
|
||||
netIPSocketSummary.RxQueueLength += line.RxQueue
|
||||
netIPSocketSummary.UsedSockets++
|
||||
if isUDP {
|
||||
udpPacketDrops += *line.Drops
|
||||
netIPSocketSummary.Drops = &udpPacketDrops
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
@ -149,7 +160,7 @@ func parseIP(hexIP string) (net.IP, error) {
|
||||
}
|
||||
|
||||
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
||||
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
|
||||
line := &netIPSocketLine{}
|
||||
if len(fields) < 10 {
|
||||
return nil, fmt.Errorf(
|
||||
@ -224,5 +235,14 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
|
||||
}
|
||||
|
||||
// drops
|
||||
if isUDP {
|
||||
drops, err := strconv.ParseUint(fields[12], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
|
||||
}
|
||||
line.Drops = &drops
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
||||
|
||||
119
vendor/github.com/prometheus/procfs/net_tls_stat.go
generated
vendored
Normal file
119
vendor/github.com/prometheus/procfs/net_tls_stat.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2023 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TLSStat struct represents data in /proc/net/tls_stat.
|
||||
// See https://docs.kernel.org/networking/tls.html#statistics
|
||||
type TLSStat struct {
|
||||
// number of TX sessions currently installed where host handles cryptography
|
||||
TLSCurrTxSw int
|
||||
// number of RX sessions currently installed where host handles cryptography
|
||||
TLSCurrRxSw int
|
||||
// number of TX sessions currently installed where NIC handles cryptography
|
||||
TLSCurrTxDevice int
|
||||
// number of RX sessions currently installed where NIC handles cryptography
|
||||
TLSCurrRxDevice int
|
||||
//number of TX sessions opened with host cryptography
|
||||
TLSTxSw int
|
||||
//number of RX sessions opened with host cryptography
|
||||
TLSRxSw int
|
||||
// number of TX sessions opened with NIC cryptography
|
||||
TLSTxDevice int
|
||||
// number of RX sessions opened with NIC cryptography
|
||||
TLSRxDevice int
|
||||
// record decryption failed (e.g. due to incorrect authentication tag)
|
||||
TLSDecryptError int
|
||||
// number of RX resyncs sent to NICs handling cryptography
|
||||
TLSRxDeviceResync int
|
||||
// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
|
||||
TLSDecryptRetry int
|
||||
// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
|
||||
TLSRxNoPadViolation int
|
||||
}
|
||||
|
||||
// NewTLSStat reads the tls_stat statistics.
|
||||
func NewTLSStat() (TLSStat, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
|
||||
return fs.NewTLSStat()
|
||||
}
|
||||
|
||||
// NewTLSStat reads the tls_stat statistics.
|
||||
func (fs FS) NewTLSStat() (TLSStat, error) {
|
||||
file, err := os.Open(fs.proc.Path("net/tls_stat"))
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var (
|
||||
tlsstat = TLSStat{}
|
||||
s = bufio.NewScanner(file)
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
|
||||
if len(fields) != 2 {
|
||||
return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
|
||||
}
|
||||
|
||||
name := fields[0]
|
||||
value, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "TlsCurrTxSw":
|
||||
tlsstat.TLSCurrTxSw = value
|
||||
case "TlsCurrRxSw":
|
||||
tlsstat.TLSCurrRxSw = value
|
||||
case "TlsCurrTxDevice":
|
||||
tlsstat.TLSCurrTxDevice = value
|
||||
case "TlsCurrRxDevice":
|
||||
tlsstat.TLSCurrRxDevice = value
|
||||
case "TlsTxSw":
|
||||
tlsstat.TLSTxSw = value
|
||||
case "TlsRxSw":
|
||||
tlsstat.TLSRxSw = value
|
||||
case "TlsTxDevice":
|
||||
tlsstat.TLSTxDevice = value
|
||||
case "TlsRxDevice":
|
||||
tlsstat.TLSRxDevice = value
|
||||
case "TlsDecryptError":
|
||||
tlsstat.TLSDecryptError = value
|
||||
case "TlsRxDeviceResync":
|
||||
tlsstat.TLSRxDeviceResync = value
|
||||
case "TlsDecryptRetry":
|
||||
tlsstat.TLSDecryptRetry = value
|
||||
case "TlsRxNoPadViolation":
|
||||
tlsstat.TLSRxNoPadViolation = value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return tlsstat, s.Err()
|
||||
}
|
||||
7
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
7
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -110,6 +110,11 @@ type ProcStat struct {
|
||||
Policy uint
|
||||
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
||||
DelayAcctBlkIOTicks uint64
|
||||
// Guest time of the process (time spent running a virtual CPU for a guest
|
||||
// operating system), measured in clock ticks.
|
||||
GuestTime int
|
||||
// Guest time of the process's children, measured in clock ticks.
|
||||
CGuestTime int
|
||||
|
||||
proc FS
|
||||
}
|
||||
@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) {
|
||||
&s.RTPriority,
|
||||
&s.Policy,
|
||||
&s.DelayAcctBlkIOTicks,
|
||||
&s.GuestTime,
|
||||
&s.CGuestTime,
|
||||
)
|
||||
if err != nil {
|
||||
return ProcStat{}, err
|
||||
|
||||
10
vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go
generated
vendored
Normal file
10
vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package cpu
|
||||
|
||||
type cpuTimes struct {
|
||||
User uint64
|
||||
Nice uint64
|
||||
Sys uint64
|
||||
Spin uint64
|
||||
Intr uint64
|
||||
Idle uint64
|
||||
}
|
||||
2
vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd.go
generated
vendored
2
vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd.go
generated
vendored
@ -45,7 +45,7 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro
|
||||
bufSize := sizeOfStatvfs * mountedFsCount
|
||||
buf := make([]Statvfs, mountedFsCount)
|
||||
|
||||
// request agian to get desired mount data
|
||||
// request again to get desired mount data
|
||||
_, _, err = unix.Syscall(
|
||||
483, // SYS___getvfsstat90 syscall
|
||||
uintptr(unsafe.Pointer(&buf[0])),
|
||||
|
||||
40
vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_riscv64.go
generated
vendored
Normal file
40
vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_riscv64.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
//go:build openbsd && riscv64
|
||||
// +build openbsd,riscv64
|
||||
|
||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||
// cgo -godefs disk/types_openbsd.go
|
||||
|
||||
package disk
|
||||
|
||||
const (
|
||||
devstat_NO_DATA = 0x00
|
||||
devstat_READ = 0x01
|
||||
devstat_WRITE = 0x02
|
||||
devstat_FREE = 0x03
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfDiskstats = 0x70
|
||||
)
|
||||
|
||||
type (
|
||||
Diskstats struct {
|
||||
Name [16]int8
|
||||
Busy int32
|
||||
Rxfer uint64
|
||||
Wxfer uint64
|
||||
Seek uint64
|
||||
Rbytes uint64
|
||||
Wbytes uint64
|
||||
Attachtime Timeval
|
||||
Timestamp Timeval
|
||||
Time Timeval
|
||||
}
|
||||
Timeval struct {
|
||||
Sec int64
|
||||
Usec int64
|
||||
}
|
||||
)
|
||||
|
||||
type Diskstat struct{}
|
||||
type bintime struct{}
|
||||
38
vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go
generated
vendored
Normal file
38
vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
//go:build openbsd && riscv64
|
||||
// +build openbsd,riscv64
|
||||
|
||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||
// cgo -godefs mem/types_openbsd.go
|
||||
|
||||
package mem
|
||||
|
||||
const (
|
||||
CTLVfs = 10
|
||||
VfsGeneric = 0
|
||||
VfsBcacheStat = 3
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfBcachestats = 0x90
|
||||
)
|
||||
|
||||
type Bcachestats struct {
|
||||
Numbufs int64
|
||||
Numbufpages int64
|
||||
Numdirtypages int64
|
||||
Numcleanpages int64
|
||||
Pendingwrites int64
|
||||
Pendingreads int64
|
||||
Numwrites int64
|
||||
Numreads int64
|
||||
Cachehits int64
|
||||
Busymapped int64
|
||||
Dmapages int64
|
||||
Highpages int64
|
||||
Delwribufs int64
|
||||
Kvaslots int64
|
||||
Avail int64
|
||||
Highflips int64
|
||||
Highflops int64
|
||||
Dmaflips int64
|
||||
}
|
||||
34
vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go
generated
vendored
34
vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go
generated
vendored
@ -1,8 +1,7 @@
|
||||
//go:build freebsd && arm64
|
||||
// +build freebsd,arm64
|
||||
|
||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||
// cgo -godefs process/types_freebsd.go
|
||||
// cgo -godefs types_freebsd.go
|
||||
|
||||
package process
|
||||
|
||||
@ -82,14 +81,14 @@ type Rlimit struct {
|
||||
type KinfoProc struct {
|
||||
Structsize int32
|
||||
Layout int32
|
||||
Args *int64 /* pargs */
|
||||
Paddr *int64 /* proc */
|
||||
Addr *int64 /* user */
|
||||
Tracep *int64 /* vnode */
|
||||
Textvp *int64 /* vnode */
|
||||
Fd *int64 /* filedesc */
|
||||
Vmspace *int64 /* vmspace */
|
||||
Wchan *byte
|
||||
Args int64 /* pargs */
|
||||
Paddr int64 /* proc */
|
||||
Addr int64 /* user */
|
||||
Tracep int64 /* vnode */
|
||||
Textvp int64 /* vnode */
|
||||
Fd int64 /* filedesc */
|
||||
Vmspace int64 /* vmspace */
|
||||
Wchan int64
|
||||
Pid int32
|
||||
Ppid int32
|
||||
Pgid int32
|
||||
@ -140,7 +139,7 @@ type KinfoProc struct {
|
||||
Wmesg [9]uint8
|
||||
Login [18]uint8
|
||||
Lockname [9]uint8
|
||||
Comm [20]int8
|
||||
Comm [20]int8 // changed from uint8 by hand
|
||||
Emul [17]uint8
|
||||
Loginclass [18]uint8
|
||||
Moretdname [4]uint8
|
||||
@ -159,11 +158,12 @@ type KinfoProc struct {
|
||||
Pri Priority
|
||||
Rusage Rusage
|
||||
Rusage_ch Rusage
|
||||
Pcb *int64 /* pcb */
|
||||
Kstack *byte
|
||||
Udata *byte
|
||||
Tdaddr *int64 /* thread */
|
||||
Spareptrs [6]*byte
|
||||
Pcb int64 /* pcb */
|
||||
Kstack int64
|
||||
Udata int64
|
||||
Tdaddr int64 /* thread */
|
||||
Pd int64 /* pwddesc, not accurate */
|
||||
Spareptrs [5]int64
|
||||
Sparelongs [12]int64
|
||||
Sflag int64
|
||||
Tdflags int64
|
||||
@ -195,7 +195,7 @@ type KinfoVmentry struct {
|
||||
Vn_rdev_freebsd11 uint32
|
||||
Vn_mode uint16
|
||||
Status uint16
|
||||
Vn_fsid uint64
|
||||
Type_spec [8]byte
|
||||
Vn_rdev uint64
|
||||
X_kve_ispare [8]int32
|
||||
Path [1024]uint8
|
||||
|
||||
204
vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go
generated
vendored
Normal file
204
vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
||||
//go:build openbsd && riscv64
|
||||
// +build openbsd,riscv64
|
||||
|
||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||
// cgo -godefs process/types_openbsd.go
|
||||
|
||||
package process
|
||||
|
||||
const (
|
||||
CTLKern = 1
|
||||
KernProc = 66
|
||||
KernProcAll = 0
|
||||
KernProcPID = 1
|
||||
KernProcProc = 8
|
||||
KernProcPathname = 12
|
||||
KernProcArgs = 55
|
||||
KernProcArgv = 1
|
||||
KernProcEnv = 3
|
||||
)
|
||||
|
||||
const (
|
||||
ArgMax = 256 * 1024
|
||||
)
|
||||
|
||||
const (
|
||||
sizeofPtr = 0x8
|
||||
sizeofShort = 0x2
|
||||
sizeofInt = 0x4
|
||||
sizeofLong = 0x8
|
||||
sizeofLongLong = 0x8
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfKinfoVmentry = 0x50
|
||||
sizeOfKinfoProc = 0x288
|
||||
)
|
||||
|
||||
const (
|
||||
SIDL = 1
|
||||
SRUN = 2
|
||||
SSLEEP = 3
|
||||
SSTOP = 4
|
||||
SZOMB = 5
|
||||
SDEAD = 6
|
||||
SONPROC = 7
|
||||
)
|
||||
|
||||
type (
|
||||
_C_short int16
|
||||
_C_int int32
|
||||
_C_long int64
|
||||
_C_long_long int64
|
||||
)
|
||||
|
||||
type Timespec struct {
|
||||
Sec int64
|
||||
Nsec int64
|
||||
}
|
||||
|
||||
type Timeval struct {
|
||||
Sec int64
|
||||
Usec int64
|
||||
}
|
||||
|
||||
type Rusage struct {
|
||||
Utime Timeval
|
||||
Stime Timeval
|
||||
Maxrss int64
|
||||
Ixrss int64
|
||||
Idrss int64
|
||||
Isrss int64
|
||||
Minflt int64
|
||||
Majflt int64
|
||||
Nswap int64
|
||||
Inblock int64
|
||||
Oublock int64
|
||||
Msgsnd int64
|
||||
Msgrcv int64
|
||||
Nsignals int64
|
||||
Nvcsw int64
|
||||
Nivcsw int64
|
||||
}
|
||||
|
||||
type Rlimit struct {
|
||||
Cur uint64
|
||||
Max uint64
|
||||
}
|
||||
|
||||
type KinfoProc struct {
|
||||
Forw uint64
|
||||
Back uint64
|
||||
Paddr uint64
|
||||
Addr uint64
|
||||
Fd uint64
|
||||
Stats uint64
|
||||
Limit uint64
|
||||
Vmspace uint64
|
||||
Sigacts uint64
|
||||
Sess uint64
|
||||
Tsess uint64
|
||||
Ru uint64
|
||||
Eflag int32
|
||||
Exitsig int32
|
||||
Flag int32
|
||||
Pid int32
|
||||
Ppid int32
|
||||
Sid int32
|
||||
X_pgid int32
|
||||
Tpgid int32
|
||||
Uid uint32
|
||||
Ruid uint32
|
||||
Gid uint32
|
||||
Rgid uint32
|
||||
Groups [16]uint32
|
||||
Ngroups int16
|
||||
Jobc int16
|
||||
Tdev uint32
|
||||
Estcpu uint32
|
||||
Rtime_sec uint32
|
||||
Rtime_usec uint32
|
||||
Cpticks int32
|
||||
Pctcpu uint32
|
||||
Swtime uint32
|
||||
Slptime uint32
|
||||
Schedflags int32
|
||||
Uticks uint64
|
||||
Sticks uint64
|
||||
Iticks uint64
|
||||
Tracep uint64
|
||||
Traceflag int32
|
||||
Holdcnt int32
|
||||
Siglist int32
|
||||
Sigmask uint32
|
||||
Sigignore uint32
|
||||
Sigcatch uint32
|
||||
Stat int8
|
||||
Priority uint8
|
||||
Usrpri uint8
|
||||
Nice uint8
|
||||
Xstat uint16
|
||||
Spare uint16
|
||||
Comm [24]int8
|
||||
Wmesg [8]uint8
|
||||
Wchan uint64
|
||||
Login [32]uint8
|
||||
Vm_rssize int32
|
||||
Vm_tsize int32
|
||||
Vm_dsize int32
|
||||
Vm_ssize int32
|
||||
Uvalid int64
|
||||
Ustart_sec uint64
|
||||
Ustart_usec uint32
|
||||
Uutime_sec uint32
|
||||
Uutime_usec uint32
|
||||
Ustime_sec uint32
|
||||
Ustime_usec uint32
|
||||
Uru_maxrss uint64
|
||||
Uru_ixrss uint64
|
||||
Uru_idrss uint64
|
||||
Uru_isrss uint64
|
||||
Uru_minflt uint64
|
||||
Uru_majflt uint64
|
||||
Uru_nswap uint64
|
||||
Uru_inblock uint64
|
||||
Uru_oublock uint64
|
||||
Uru_msgsnd uint64
|
||||
Uru_msgrcv uint64
|
||||
Uru_nsignals uint64
|
||||
Uru_nvcsw uint64
|
||||
Uru_nivcsw uint64
|
||||
Uctime_sec uint32
|
||||
Uctime_usec uint32
|
||||
Psflags uint32
|
||||
Acflag uint32
|
||||
Svuid uint32
|
||||
Svgid uint32
|
||||
Emul [8]uint8
|
||||
Rlim_rss_cur uint64
|
||||
Cpuid uint64
|
||||
Vm_map_size uint64
|
||||
Tid int32
|
||||
Rtableid uint32
|
||||
Pledge uint64
|
||||
Name [24]uint8
|
||||
}
|
||||
|
||||
type Priority struct{}
|
||||
|
||||
type KinfoVmentry struct {
|
||||
Start uint64
|
||||
End uint64
|
||||
Guard uint64
|
||||
Fspace uint64
|
||||
Fspace_augment uint64
|
||||
Offset uint64
|
||||
Wired_count int32
|
||||
Etype int32
|
||||
Protection int32
|
||||
Max_protection int32
|
||||
Advice int32
|
||||
Inheritance int32
|
||||
Flags uint8
|
||||
Pad_cgo_0 [7]byte
|
||||
}
|
||||
4
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
4
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
||||
logrus
|
||||
vendor
|
||||
|
||||
.idea/
|
||||
40
vendor/github.com/sirupsen/logrus/.golangci.yml
generated
vendored
40
vendor/github.com/sirupsen/logrus/.golangci.yml
generated
vendored
@ -1,40 +0,0 @@
|
||||
run:
|
||||
# do not run on test files yet
|
||||
tests: false
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
|
||||
lll:
|
||||
line-length: 100
|
||||
tab-width: 4
|
||||
|
||||
prealloc:
|
||||
simple: false
|
||||
range-loops: false
|
||||
for-loops: false
|
||||
|
||||
whitespace:
|
||||
multi-if: false # Enforces newlines (or comments) after every multi-line if statement
|
||||
multi-func: false # Enforces newlines (or comments) after every multi-line function signature
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- megacheck
|
||||
- govet
|
||||
disable:
|
||||
- maligned
|
||||
- prealloc
|
||||
disable-all: false
|
||||
presets:
|
||||
- bugs
|
||||
- unused
|
||||
fast: false
|
||||
15
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
15
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
@ -1,15 +0,0 @@
|
||||
language: go
|
||||
go_import_path: github.com/sirupsen/logrus
|
||||
git:
|
||||
depth: 1
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
go: 1.15.x
|
||||
os: linux
|
||||
install:
|
||||
- ./travis/install.sh
|
||||
script:
|
||||
- cd ci
|
||||
- go run mage.go -v -w ../ crossBuild
|
||||
- go run mage.go -v -w ../ lint
|
||||
- go run mage.go -v -w ../ test
|
||||
259
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
259
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
@ -1,259 +0,0 @@
|
||||
# 1.8.1
|
||||
Code quality:
|
||||
* move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer
|
||||
* improve timestamp format documentation
|
||||
|
||||
Fixes:
|
||||
* fix race condition on logger hooks
|
||||
|
||||
|
||||
# 1.8.0
|
||||
|
||||
Correct versioning number replacing v1.7.1.
|
||||
|
||||
# 1.7.1
|
||||
|
||||
Beware this release has introduced a new public API and its semver is therefore incorrect.
|
||||
|
||||
Code quality:
|
||||
* use go 1.15 in travis
|
||||
* use magefile as task runner
|
||||
|
||||
Fixes:
|
||||
* small fixes about new go 1.13 error formatting system
|
||||
* Fix for long time race condiction with mutating data hooks
|
||||
|
||||
Features:
|
||||
* build support for zos
|
||||
|
||||
# 1.7.0
|
||||
Fixes:
|
||||
* the dependency toward a windows terminal library has been removed
|
||||
|
||||
Features:
|
||||
* a new buffer pool management API has been added
|
||||
* a set of `<LogLevel>Fn()` functions have been added
|
||||
|
||||
# 1.6.0
|
||||
Fixes:
|
||||
* end of line cleanup
|
||||
* revert the entry concurrency bug fix whic leads to deadlock under some circumstances
|
||||
* update dependency on go-windows-terminal-sequences to fix a crash with go 1.14
|
||||
|
||||
Features:
|
||||
* add an option to the `TextFormatter` to completely disable fields quoting
|
||||
|
||||
# 1.5.0
|
||||
Code quality:
|
||||
* add golangci linter run on travis
|
||||
|
||||
Fixes:
|
||||
* add mutex for hooks concurrent access on `Entry` data
|
||||
* caller function field for go1.14
|
||||
* fix build issue for gopherjs target
|
||||
|
||||
Feature:
|
||||
* add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level
|
||||
* add a `DisableHTMLEscape` option in the `JSONFormatter`
|
||||
* add `ForceQuote` and `PadLevelText` options in the `TextFormatter`
|
||||
|
||||
# 1.4.2
|
||||
* Fixes build break for plan9, nacl, solaris
|
||||
# 1.4.1
|
||||
This new release introduces:
|
||||
* Enhance TextFormatter to not print caller information when they are empty (#944)
|
||||
* Remove dependency on golang.org/x/crypto (#932, #943)
|
||||
|
||||
Fixes:
|
||||
* Fix Entry.WithContext method to return a copy of the initial entry (#941)
|
||||
|
||||
# 1.4.0
|
||||
This new release introduces:
|
||||
* Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
|
||||
* Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911)
|
||||
* Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
|
||||
|
||||
Fixes:
|
||||
* Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
|
||||
* Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
|
||||
* Fix infinite recursion on unknown `Level.String()` (#907)
|
||||
* Fix race condition in `getCaller` (#916).
|
||||
|
||||
|
||||
# 1.3.0
|
||||
This new release introduces:
|
||||
* Log, Logf, Logln functions for Logger and Entry that take a Level
|
||||
|
||||
Fixes:
|
||||
* Building prometheus node_exporter on AIX (#840)
|
||||
* Race condition in TextFormatter (#468)
|
||||
* Travis CI import path (#868)
|
||||
* Remove coloured output on Windows (#862)
|
||||
* Pointer to func as field in JSONFormatter (#870)
|
||||
* Properly marshal Levels (#873)
|
||||
|
||||
# 1.2.0
|
||||
This new release introduces:
|
||||
* A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
|
||||
* A new trace level named `Trace` whose level is below `Debug`
|
||||
* A configurable exit function to be called upon a Fatal trace
|
||||
* The `Level` object now implements `encoding.TextUnmarshaler` interface
|
||||
|
||||
# 1.1.1
|
||||
This is a bug fix release.
|
||||
* fix the build break on Solaris
|
||||
* don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
|
||||
|
||||
# 1.1.0
|
||||
This new release introduces:
|
||||
* several fixes:
|
||||
* a fix for a race condition on entry formatting
|
||||
* proper cleanup of previously used entries before putting them back in the pool
|
||||
* the extra new line at the end of message in text formatter has been removed
|
||||
* a new global public API to check if a level is activated: IsLevelEnabled
|
||||
* the following methods have been added to the Logger object
|
||||
* IsLevelEnabled
|
||||
* SetFormatter
|
||||
* SetOutput
|
||||
* ReplaceHooks
|
||||
* introduction of go module
|
||||
* an indent configuration for the json formatter
|
||||
* output colour support for windows
|
||||
* the field sort function is now configurable for text formatter
|
||||
* the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
|
||||
|
||||
# 1.0.6
|
||||
|
||||
This new release introduces:
|
||||
* a new api WithTime which allows to easily force the time of the log entry
|
||||
which is mostly useful for logger wrapper
|
||||
* a fix reverting the immutability of the entry given as parameter to the hooks
|
||||
a new configuration field of the json formatter in order to put all the fields
|
||||
in a nested dictionnary
|
||||
* a new SetOutput method in the Logger
|
||||
* a new configuration of the textformatter to configure the name of the default keys
|
||||
* a new configuration of the text formatter to disable the level truncation
|
||||
|
||||
# 1.0.5
|
||||
|
||||
* Fix hooks race (#707)
|
||||
* Fix panic deadlock (#695)
|
||||
|
||||
# 1.0.4
|
||||
|
||||
* Fix race when adding hooks (#612)
|
||||
* Fix terminal check in AppEngine (#635)
|
||||
|
||||
# 1.0.3
|
||||
|
||||
* Replace example files with testable examples
|
||||
|
||||
# 1.0.2
|
||||
|
||||
* bug: quote non-string values in text formatter (#583)
|
||||
* Make (*Logger) SetLevel a public method
|
||||
|
||||
# 1.0.1
|
||||
|
||||
* bug: fix escaping in text formatter (#575)
|
||||
|
||||
# 1.0.0
|
||||
|
||||
* Officially changed name to lower-case
|
||||
* bug: colors on Windows 10 (#541)
|
||||
* bug: fix race in accessing level (#512)
|
||||
|
||||
# 0.11.5
|
||||
|
||||
* feature: add writer and writerlevel to entry (#372)
|
||||
|
||||
# 0.11.4
|
||||
|
||||
* bug: fix undefined variable on solaris (#493)
|
||||
|
||||
# 0.11.3
|
||||
|
||||
* formatter: configure quoting of empty values (#484)
|
||||
* formatter: configure quoting character (default is `"`) (#484)
|
||||
* bug: fix not importing io correctly in non-linux environments (#481)
|
||||
|
||||
# 0.11.2
|
||||
|
||||
* bug: fix windows terminal detection (#476)
|
||||
|
||||
# 0.11.1
|
||||
|
||||
* bug: fix tty detection with custom out (#471)
|
||||
|
||||
# 0.11.0
|
||||
|
||||
* performance: Use bufferpool to allocate (#370)
|
||||
* terminal: terminal detection for app-engine (#343)
|
||||
* feature: exit handler (#375)
|
||||
|
||||
# 0.10.0
|
||||
|
||||
* feature: Add a test hook (#180)
|
||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||
* performance: avoid re-allocations on `WithFields` (#335)
|
||||
|
||||
# 0.9.0
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
* logrus/core: run tests with `-race`
|
||||
* logrus/core: detect TTY based on `stderr`
|
||||
* logrus/core: support `WithError` on logger
|
||||
* logrus/core: Solaris support
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
||||
21
vendor/github.com/sirupsen/logrus/LICENSE
generated
vendored
21
vendor/github.com/sirupsen/logrus/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
515
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
515
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
@ -1,515 +0,0 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [](https://travis-ci.org/sirupsen/logrus) [](https://pkg.go.dev/github.com/sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger.
|
||||
|
||||
**Logrus is in maintenance-mode.** We will not be introducing new features. It's
|
||||
simply too hard to do in a way that won't break many people's projects, which is
|
||||
the last thing you want from your Logging library (again...).
|
||||
|
||||
This does not mean Logrus is dead. Logrus will continue to be maintained for
|
||||
security, (backwards compatible) bug fixes, and performance (where we are
|
||||
limited by the interface).
|
||||
|
||||
I believe Logrus' biggest contribution is to have played a part in today's
|
||||
widespread use of structured logging in Golang. There doesn't seem to be a
|
||||
reason to do a major, breaking iteration into Logrus V2, since the fantastic Go
|
||||
community has built those independently. Many fantastic alternatives have sprung
|
||||
up. Logrus would look like those, had it been re-designed with what we know
|
||||
about structured logging in Go today. Check out, for example,
|
||||
[Zerolog][zerolog], [Zap][zap], and [Apex][apex].
|
||||
|
||||
[zerolog]: https://github.com/rs/zerolog
|
||||
[zap]: https://github.com/uber-go/zap
|
||||
[apex]: https://github.com/apex/log
|
||||
|
||||
**Seeing weird case-sensitive problems?** It's in the past been possible to
|
||||
import Logrus as both upper- and lower-case. Due to the Go package environment,
|
||||
this caused issues in the community and we needed a standard. Some environments
|
||||
experienced problems with the upper-case variant, so the lower-case was decided.
|
||||
Everything using `logrus` will need to use the lower-case:
|
||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||
|
||||
To fix Glide, see [these
|
||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||
For an in-depth explanation of the casing issue, see [this
|
||||
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```text
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
```
|
||||
To ensure this behaviour even if a TTY is attached, set your formatter as follows:
|
||||
|
||||
```go
|
||||
log.SetFormatter(&log.TextFormatter{
|
||||
DisableColors: true,
|
||||
FullTimestamp: true,
|
||||
})
|
||||
```
|
||||
|
||||
#### Logging Method Name
|
||||
|
||||
If you wish to add the calling method as a field, instruct the logger via:
|
||||
```go
|
||||
log.SetReportCaller(true)
|
||||
```
|
||||
This adds the caller as 'method' like so:
|
||||
|
||||
```json
|
||||
{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
|
||||
"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
|
||||
```
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
|
||||
```
|
||||
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
|
||||
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
|
||||
environment via benchmarks:
|
||||
```
|
||||
go test -bench=.*CallerTracing
|
||||
```
|
||||
|
||||
|
||||
#### Case-sensitivity
|
||||
|
||||
The organization's name was changed to lower-case--and this will not be changed
|
||||
back. If you are getting import conflicts due to case sensitivity, please use
|
||||
the lower-case import: `github.com/sirupsen/logrus`.
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stdout instead of the default stderr
|
||||
// Can be any io.Writer, see below for File example
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stdout
|
||||
|
||||
// You could set this to any `io.Writer` such as a file
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging through logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Default Fields
|
||||
|
||||
Often it's helpful to have fields _always_ attached to log statements in an
|
||||
application or parts of one. For example, you may want to always log the
|
||||
`request_id` and `user_ip` in the context of a request. Instead of writing
|
||||
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
|
||||
every line, you can create a `logrus.Entry` to pass around instead:
|
||||
|
||||
```go
|
||||
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
|
||||
requestLogger.Info("something happened on that request") # will log request_id and user_ip
|
||||
requestLogger.Warn("something not great happened")
|
||||
```
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
|
||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
|
||||
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Trace("Something very low level.")
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`. For Windows, see
|
||||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||
* When colors are enabled, levels are truncated to 4 characters by default. To disable
|
||||
truncation set the `DisableLevelTruncation` field to `true`.
|
||||
* When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
|
||||
* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo.
|
||||
* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
|
||||
* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files.
|
||||
* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
This means that we can override the standard library logger easily:
|
||||
|
||||
```go
|
||||
logger := logrus.New()
|
||||
logger.Formatter = &logrus.JSONFormatter{}
|
||||
|
||||
// Use logrus for standard log output
|
||||
// Note that `log` here references stdlib's log
|
||||
// Not logrus imported under the name `log`.
|
||||
log.SetOutput(logger.Writer())
|
||||
```
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
#### Tools
|
||||
|
||||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.|
|
||||
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||
|
||||
#### Testing
|
||||
|
||||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||
|
||||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook
|
||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||
|
||||
```go
|
||||
import(
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSomething(t*testing.T){
|
||||
logger, hook := test.NewNullLogger()
|
||||
logger.Error("Helloerror")
|
||||
|
||||
assert.Equal(t, 1, len(hook.Entries))
|
||||
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(t, hook.LastEntry())
|
||||
}
|
||||
```
|
||||
|
||||
#### Fatal handlers
|
||||
|
||||
Logrus can register one or more functions that will be called when any `fatal`
|
||||
level message is logged. The registered handlers will be executed before
|
||||
logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need
|
||||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
||||
|
||||
```
|
||||
...
|
||||
handler := func() {
|
||||
// gracefully shutdown something...
|
||||
}
|
||||
logrus.RegisterExitHandler(handler)
|
||||
...
|
||||
```
|
||||
|
||||
#### Thread safety
|
||||
|
||||
By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
|
||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||
|
||||
Situation when locking is not needed includes:
|
||||
|
||||
* You have no hooks registered, or hooks calling is already thread-safe.
|
||||
|
||||
* Writing to logger.Out is already thread-safe, for example:
|
||||
|
||||
1) logger.Out is protected by locks.
|
||||
|
||||
2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing)
|
||||
|
||||
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
||||
76
vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
76
vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
@ -1,76 +0,0 @@
|
||||
package logrus
|
||||
|
||||
// The following code was sourced and modified from the
|
||||
// https://github.com/tebeka/atexit package governed by the following license:
|
||||
//
|
||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
// subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var handlers = []func(){}
|
||||
|
||||
func runHandler(handler func()) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
handler()
|
||||
}
|
||||
|
||||
func runHandlers() {
|
||||
for _, handler := range handlers {
|
||||
runHandler(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
||||
func Exit(code int) {
|
||||
runHandlers()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
|
||||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||
// any Fatal log entry is made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func RegisterExitHandler(handler func()) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
|
||||
// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
|
||||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||
// any Fatal log entry is made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func DeferExitHandler(handler func()) {
|
||||
handlers = append([]func(){handler}, handlers...)
|
||||
}
|
||||
14
vendor/github.com/sirupsen/logrus/appveyor.yml
generated
vendored
14
vendor/github.com/sirupsen/logrus/appveyor.yml
generated
vendored
@ -1,14 +0,0 @@
|
||||
version: "{build}"
|
||||
platform: x64
|
||||
clone_folder: c:\gopath\src\github.com\sirupsen\logrus
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||
- go version
|
||||
build_script:
|
||||
- go get -t
|
||||
- go test
|
||||
43
vendor/github.com/sirupsen/logrus/buffer_pool.go
generated
vendored
43
vendor/github.com/sirupsen/logrus/buffer_pool.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
bufferPool BufferPool
|
||||
)
|
||||
|
||||
type BufferPool interface {
|
||||
Put(*bytes.Buffer)
|
||||
Get() *bytes.Buffer
|
||||
}
|
||||
|
||||
type defaultPool struct {
|
||||
pool *sync.Pool
|
||||
}
|
||||
|
||||
func (p *defaultPool) Put(buf *bytes.Buffer) {
|
||||
p.pool.Put(buf)
|
||||
}
|
||||
|
||||
func (p *defaultPool) Get() *bytes.Buffer {
|
||||
return p.pool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// SetBufferPool allows to replace the default logrus buffer pool
|
||||
// to better meets the specific needs of an application.
|
||||
func SetBufferPool(bp BufferPool) {
|
||||
bufferPool = bp
|
||||
}
|
||||
|
||||
func init() {
|
||||
SetBufferPool(&defaultPool{
|
||||
pool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
26
vendor/github.com/sirupsen/logrus/doc.go
generated
vendored
26
vendor/github.com/sirupsen/logrus/doc.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
||||
442
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
442
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
@ -1,442 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
// qualified package name, cached at first use
|
||||
logrusPackage string
|
||||
|
||||
// Positions in the call stack when tracing to report the calling method
|
||||
minimumCallerDepth int
|
||||
|
||||
// Used for caller information initialisation
|
||||
callerInitOnce sync.Once
|
||||
)
|
||||
|
||||
const (
|
||||
maximumCallerDepth int = 25
|
||||
knownLogrusFrames int = 4
|
||||
)
|
||||
|
||||
func init() {
|
||||
// start at the bottom of the stack before the package-name cache is primed
|
||||
minimumCallerDepth = 1
|
||||
}
|
||||
|
||||
// Defines the key when adding errors using WithError.
|
||||
var ErrorKey = "error"
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
|
||||
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
|
||||
// reused and passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||
Level Level
|
||||
|
||||
// Calling method, with package name
|
||||
Caller *runtime.Frame
|
||||
|
||||
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
|
||||
// When formatter is called in entry.log(), a Buffer may be set to entry
|
||||
Buffer *bytes.Buffer
|
||||
|
||||
// Contains the context set by the user. Useful for hook processing etc.
|
||||
Context context.Context
|
||||
|
||||
// err may contain a field formatting error
|
||||
err string
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is three fields, plus one optional. Give a little extra room.
|
||||
Data: make(Fields, 6),
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Dup() *Entry {
|
||||
data := make(Fields, len(entry.Data))
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
|
||||
}
|
||||
|
||||
// Returns the bytes representation of this entry from the formatter.
|
||||
func (entry *Entry) Bytes() ([]byte, error) {
|
||||
return entry.Logger.Formatter.Format(entry)
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
serialized, err := entry.Bytes()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
str := string(serialized)
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||
func (entry *Entry) WithError(err error) *Entry {
|
||||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a context to the Entry.
|
||||
func (entry *Entry) WithContext(ctx context.Context) *Entry {
|
||||
dataCopy := make(Fields, len(entry.Data))
|
||||
for k, v := range entry.Data {
|
||||
dataCopy[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx}
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := make(Fields, len(entry.Data)+len(fields))
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
fieldErr := entry.err
|
||||
for k, v := range fields {
|
||||
isErrField := false
|
||||
if t := reflect.TypeOf(v); t != nil {
|
||||
switch {
|
||||
case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
|
||||
isErrField = true
|
||||
}
|
||||
}
|
||||
if isErrField {
|
||||
tmp := fmt.Sprintf("can not add field %q", k)
|
||||
if fieldErr != "" {
|
||||
fieldErr = entry.err + ", " + tmp
|
||||
} else {
|
||||
fieldErr = tmp
|
||||
}
|
||||
} else {
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
|
||||
}
|
||||
|
||||
// Overrides the time of the Entry.
|
||||
func (entry *Entry) WithTime(t time.Time) *Entry {
|
||||
dataCopy := make(Fields, len(entry.Data))
|
||||
for k, v := range entry.Data {
|
||||
dataCopy[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context}
|
||||
}
|
||||
|
||||
// getPackageName reduces a fully qualified function name to the package name
|
||||
// There really ought to be to be a better way...
|
||||
func getPackageName(f string) string {
|
||||
for {
|
||||
lastPeriod := strings.LastIndex(f, ".")
|
||||
lastSlash := strings.LastIndex(f, "/")
|
||||
if lastPeriod > lastSlash {
|
||||
f = f[:lastPeriod]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// getCaller retrieves the name of the first non-logrus calling function
|
||||
func getCaller() *runtime.Frame {
|
||||
// cache this package's fully-qualified name
|
||||
callerInitOnce.Do(func() {
|
||||
pcs := make([]uintptr, maximumCallerDepth)
|
||||
_ = runtime.Callers(0, pcs)
|
||||
|
||||
// dynamic get the package name and the minimum caller depth
|
||||
for i := 0; i < maximumCallerDepth; i++ {
|
||||
funcName := runtime.FuncForPC(pcs[i]).Name()
|
||||
if strings.Contains(funcName, "getCaller") {
|
||||
logrusPackage = getPackageName(funcName)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
minimumCallerDepth = knownLogrusFrames
|
||||
})
|
||||
|
||||
// Restrict the lookback frames to avoid runaway lookups
|
||||
pcs := make([]uintptr, maximumCallerDepth)
|
||||
depth := runtime.Callers(minimumCallerDepth, pcs)
|
||||
frames := runtime.CallersFrames(pcs[:depth])
|
||||
|
||||
for f, again := frames.Next(); again; f, again = frames.Next() {
|
||||
pkg := getPackageName(f.Function)
|
||||
|
||||
// If the caller isn't part of this package, we're done
|
||||
if pkg != logrusPackage {
|
||||
return &f //nolint:scopelint
|
||||
}
|
||||
}
|
||||
|
||||
// if we got here, we failed to find the caller's context
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry Entry) HasCaller() (has bool) {
|
||||
return entry.Logger != nil &&
|
||||
entry.Logger.ReportCaller &&
|
||||
entry.Caller != nil
|
||||
}
|
||||
|
||||
func (entry *Entry) log(level Level, msg string) {
|
||||
var buffer *bytes.Buffer
|
||||
|
||||
newEntry := entry.Dup()
|
||||
|
||||
if newEntry.Time.IsZero() {
|
||||
newEntry.Time = time.Now()
|
||||
}
|
||||
|
||||
newEntry.Level = level
|
||||
newEntry.Message = msg
|
||||
|
||||
newEntry.Logger.mu.Lock()
|
||||
reportCaller := newEntry.Logger.ReportCaller
|
||||
bufPool := newEntry.getBufferPool()
|
||||
newEntry.Logger.mu.Unlock()
|
||||
|
||||
if reportCaller {
|
||||
newEntry.Caller = getCaller()
|
||||
}
|
||||
|
||||
newEntry.fireHooks()
|
||||
buffer = bufPool.Get()
|
||||
defer func() {
|
||||
newEntry.Buffer = nil
|
||||
buffer.Reset()
|
||||
bufPool.Put(buffer)
|
||||
}()
|
||||
buffer.Reset()
|
||||
newEntry.Buffer = buffer
|
||||
|
||||
newEntry.write()
|
||||
|
||||
newEntry.Buffer = nil
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(newEntry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) getBufferPool() (pool BufferPool) {
|
||||
if entry.Logger.BufferPool != nil {
|
||||
return entry.Logger.BufferPool
|
||||
}
|
||||
return bufferPool
|
||||
}
|
||||
|
||||
func (entry *Entry) fireHooks() {
|
||||
var tmpHooks LevelHooks
|
||||
entry.Logger.mu.Lock()
|
||||
tmpHooks = make(LevelHooks, len(entry.Logger.Hooks))
|
||||
for k, v := range entry.Logger.Hooks {
|
||||
tmpHooks[k] = v
|
||||
}
|
||||
entry.Logger.mu.Unlock()
|
||||
|
||||
err := tmpHooks.Fire(entry.Level, entry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) write() {
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
return
|
||||
}
|
||||
if _, err := entry.Logger.Out.Write(serialized); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Log will log a message at the level given as parameter.
|
||||
// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
|
||||
// For this behaviour Entry.Panic or Entry.Fatal should be used instead.
|
||||
func (entry *Entry) Log(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.log(level, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Trace(args ...interface{}) {
|
||||
entry.Log(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
entry.Log(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
entry.Log(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
entry.Log(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
entry.Warn(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
entry.Log(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
entry.Log(FatalLevel, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
entry.Log(PanicLevel, args...)
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.Log(level, fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||
entry.Logf(TraceLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
entry.Logf(DebugLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
entry.Logf(InfoLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
entry.Logf(WarnLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
entry.Logf(ErrorLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
entry.Logf(FatalLevel, format, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
entry.Logf(PanicLevel, format, args...)
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Logln(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.Log(level, entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Traceln(args ...interface{}) {
|
||||
entry.Logln(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
entry.Logln(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
entry.Logln(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
entry.Logln(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
entry.Logln(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
entry.Logln(FatalLevel, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
entry.Logln(PanicLevel, args...)
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user